Spaces:
Sleeping
Sleeping
Update agent.py
Browse files
agent.py
CHANGED
|
@@ -1,60 +1,62 @@
|
|
| 1 |
-
import asyncio
|
|
|
|
|
|
|
| 2 |
import tools
|
| 3 |
from llama_index.core import Settings
|
| 4 |
from llama_index.core.agent.workflow import AgentWorkflow
|
| 5 |
|
| 6 |
-
# Silence the torch error
|
| 7 |
-
def init_async():
|
| 8 |
-
|
| 9 |
-
|
| 10 |
|
| 11 |
|
| 12 |
-
def run_async_task(async_func, *args):
|
| 13 |
-
|
| 14 |
-
|
| 15 |
|
| 16 |
-
|
| 17 |
-
|
| 18 |
-
|
| 19 |
|
| 20 |
-
|
| 21 |
-
|
| 22 |
-
|
| 23 |
|
| 24 |
-
|
| 25 |
|
| 26 |
-
|
| 27 |
-
|
| 28 |
|
| 29 |
-
|
| 30 |
-
|
| 31 |
-
|
| 32 |
-
|
| 33 |
-
|
| 34 |
|
| 35 |
-
|
| 36 |
-
|
| 37 |
|
| 38 |
-
|
| 39 |
-
|
| 40 |
-
|
| 41 |
-
|
| 42 |
|
| 43 |
|
| 44 |
# Initialize model (defaults to llama3.1)
|
| 45 |
def init_model(model:str='llama3.1:8b-instruct-q4_0'):
|
| 46 |
-
|
| 47 |
-
from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 48 |
|
| 49 |
-
|
| 50 |
-
|
| 51 |
-
|
| 52 |
-
|
| 53 |
|
| 54 |
-
llm_model = HuggingFaceInferenceAPI(
|
| 55 |
-
|
| 56 |
-
|
| 57 |
-
)
|
| 58 |
|
| 59 |
return llm_model
|
| 60 |
|
|
@@ -96,10 +98,11 @@ def init_agent():
|
|
| 96 |
agent = AgentWorkflow.from_tools_or_functions(
|
| 97 |
tools_or_functions=tool_list,
|
| 98 |
llm=llm_model,
|
| 99 |
-
system_prompt
|
| 100 |
-
|
| 101 |
-
|
| 102 |
-
|
|
|
|
| 103 |
)
|
| 104 |
|
| 105 |
return agent
|
|
@@ -112,4 +115,14 @@ async def run_agent(agent,query:str):
|
|
| 112 |
# Await function for async
|
| 113 |
async def await_result(agent,query:str):
|
| 114 |
response = await run_agent(agent,query)
|
| 115 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# import asyncio
|
| 2 |
+
import utils
|
| 3 |
+
# import streamlit as st
|
| 4 |
import tools
|
| 5 |
from llama_index.core import Settings
|
| 6 |
from llama_index.core.agent.workflow import AgentWorkflow
|
| 7 |
|
| 8 |
+
# # Silence the torch error
|
| 9 |
+
# def init_async():
|
| 10 |
+
# import torch
|
| 11 |
+
# torch.classes.__path__ = [] # add this line to manually set it to empty.
|
| 12 |
|
| 13 |
|
| 14 |
+
# def run_async_task(async_func, *args):
|
| 15 |
+
# """
|
| 16 |
+
# Run an asynchronous function in a new event loop.
|
| 17 |
|
| 18 |
+
# Args:
|
| 19 |
+
# async_func (coroutine): The asynchronous function to execute.
|
| 20 |
+
# *args: Arguments to pass to the asynchronous function.
|
| 21 |
|
| 22 |
+
# Returns:
|
| 23 |
+
# None
|
| 24 |
+
# """
|
| 25 |
|
| 26 |
+
# loop = None
|
| 27 |
|
| 28 |
+
# try:
|
| 29 |
+
# loop = asyncio.new_event_loop()
|
| 30 |
|
| 31 |
+
# loop.run_until_complete(async_func(*args))
|
| 32 |
+
# except:
|
| 33 |
+
# # Close the existing loop if open
|
| 34 |
+
# if loop is not None:
|
| 35 |
+
# loop.close()
|
| 36 |
|
| 37 |
+
# # Create a new loop for retry
|
| 38 |
+
# loop = asyncio.new_event_loop()
|
| 39 |
|
| 40 |
+
# loop.run_until_complete(async_func(*args))
|
| 41 |
+
# finally:
|
| 42 |
+
# if loop is not None:
|
| 43 |
+
# loop.close()
|
| 44 |
|
| 45 |
|
| 46 |
# Initialize model (defaults to llama3.1)
|
| 47 |
def init_model(model:str='llama3.1:8b-instruct-q4_0'):
|
| 48 |
+
from llama_index.llms.ollama import Ollama
|
| 49 |
+
# from llama_index.llms.huggingface_api import HuggingFaceInferenceAPI
|
| 50 |
|
| 51 |
+
llm_model = Ollama(
|
| 52 |
+
model=model,
|
| 53 |
+
request_timeout=360.0
|
| 54 |
+
)
|
| 55 |
|
| 56 |
+
# llm_model = HuggingFaceInferenceAPI(
|
| 57 |
+
# model='https://pflgm2locj2t89co.us-east-1.aws.endpoints.huggingface.cloud/'
|
| 58 |
+
# # model_name='Qwen/Qwen2.5-Coder-32B-Instruct'
|
| 59 |
+
# )
|
| 60 |
|
| 61 |
return llm_model
|
| 62 |
|
|
|
|
| 98 |
agent = AgentWorkflow.from_tools_or_functions(
|
| 99 |
tools_or_functions=tool_list,
|
| 100 |
llm=llm_model,
|
| 101 |
+
system_prompt="You are a general AI assistant. I will ask you a question. Report your thoughts, and finish your answer with the following template: FINAL ANSWER: [YOUR FINAL ANSWER]. YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string."
|
| 102 |
+
# system_prompt = "You are a useful AI assistant. Use all available tools to answer the inquiry. " \
|
| 103 |
+
# "Before giving out the final answer, please verify using available tools." \
|
| 104 |
+
# "When available, use the Wikipedia if it can be verifiable." \
|
| 105 |
+
# "Multiple tools can combine to answer the question as a whole."
|
| 106 |
)
|
| 107 |
|
| 108 |
return agent
|
|
|
|
| 115 |
# Await function for async
|
| 116 |
async def await_result(agent,query:str):
|
| 117 |
response = await run_agent(agent,query)
|
| 118 |
+
clean_response = utils.check_value(response.response.blocks[0].text[response.response.blocks[0].text.find(':')+2:])
|
| 119 |
+
|
| 120 |
+
# st.write(response.response.blocks[0].text)
|
| 121 |
+
# st.write(clean_response)
|
| 122 |
+
# st.write(response.raw)
|
| 123 |
+
# st.write(response.tool_calls)
|
| 124 |
+
# st.session_state.messages.append({'role':'assistant','content':response.response.blocks[0].text})
|
| 125 |
+
# st.session_state.messages.append({'role':'assistant','content':clean_response})
|
| 126 |
+
|
| 127 |
+
# return response.response.blocks[0].text
|
| 128 |
+
return clean_response
|