final_assessment / agent.py
staedi's picture
Update agent.py
6701381 verified
# import asyncio
import utils
# import streamlit as st
import tools
from llama_index.core import Settings
from llama_index.core.agent.workflow import AgentWorkflow
# # Silence the torch error
# def init_async():
# import torch
# torch.classes.__path__ = [] # add this line to manually set it to empty.
# def run_async_task(async_func, *args):
# """
# Run an asynchronous function in a new event loop.
# Args:
# async_func (coroutine): The asynchronous function to execute.
# *args: Arguments to pass to the asynchronous function.
# Returns:
# None
# """
# loop = None
# try:
# loop = asyncio.new_event_loop()
# loop.run_until_complete(async_func(*args))
# except:
# # Close the existing loop if open
# if loop is not None:
# loop.close()
# # Create a new loop for retry
# loop = asyncio.new_event_loop()
# loop.run_until_complete(async_func(*args))
# finally:
# if loop is not None:
# loop.close()
# Initialize model (defaults to llama3.1)
def init_model(model:str='llama3.1:8b-instruct-q4_0'):
# from llama_index.llms.ollama import Ollama
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
# llm_model = Ollama(
# model=model,
# request_timeout=360.0
# )
llm_model = HuggingFaceInferenceAPI(
model='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud/'
# model_name='Qwen/Qwen2.5-Coder-32B-Instruct'
)
return llm_model
# Initialize embedding model for vector store (defaults to huggingface)
def init_embed_model(model:str='BAAI/bge-small-en-v1.5'):
from llama_index.embeddings.huggingface import HuggingFaceEmbedding
embed_model = HuggingFaceEmbedding(model_name=model)
return embed_model
# Create Tools
def create_tool():
tool_list = []
tool_list.extend([tools.add, tools.divide, tools.multiply, tools.subtract, tools.search_tool])
tool_list.extend(tools.wiki_tool)
return tool_list
# Initialize the Agent
def init_agent():
# Init model
llm_model = init_model()
# Embedding model
embed_model = init_embed_model()
# To override the OpenAI errors
Settings.llm = llm_model
Settings.embed_model = embed_model
# Call Tool
tool_list = create_tool()
agent = AgentWorkflow.from_tools_or_functions(
tools_or_functions=tool_list,
llm=llm_model,
system_prompt="You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
# system_prompt = "You are a useful AI assistant. Use all available tools to answer the inquiry. " \
# "Before giving out the final answer, please verify using available tools." \
# "When available, use the Wikipedia if it can be verifiable." \
# "Multiple tools can combine to answer the question as a whole."
)
return agent
# Run the agent
async def run_agent(agent,query:str):
response = await agent.run(query)
return response
# Await function for async
async def await_result(agent,query:str):
response = await run_agent(agent,query)
clean_response = utils.check_value(response.response.blocks[0].text[response.response.blocks[0].text.find(':')+2:])
# st.write(response.response.blocks[0].text)
# st.write(clean_response)
# st.write(response.raw)
# st.write(response.tool_calls)
# st.session_state.messages.append({'role':'assistant','content':response.response.blocks[0].text})
# st.session_state.messages.append({'role':'assistant','content':clean_response})
# return response.response.blocks[0].text
return clean_response