| from langchain_community.chat_models import ChatOllama | |
| from prompts.prompt import rag_agent_prompt | |
| from agents.functions_agent.base import create_functions_agent | |
| from langchain.agents import AgentExecutor | |
| from langchain.memory import ChatMessageHistory | |
| from functions import get_openai_functions, tools, get_openai_tools | |
| from config import config | |
| llm = ChatOllama(model = config.ollama_model, temperature = 0.55) | |
| tools_dict = get_openai_tools() | |
| history = ChatMessageHistory() | |
| functions_agent = create_functions_agent(llm=llm, prompt=rag_agent_prompt) | |
| functions_agent_executor = AgentExecutor(agent=functions_agent, tools=tools, verbose=True, return_intermediate_steps=True) | |
| if __name__ == "__main__": | |
| while True: | |
| try: | |
| inp = input("User:") | |
| if inp == "/bye": | |
| break | |
| response = functions_agent_executor.invoke({"input": inp, "chat_history": history, "tools" : tools_dict}) | |
| response['output'] = response['output'].replace("<|im_end|>", "") | |
| history.add_user_message(inp) | |
| history.add_ai_message(response['output']) | |
| print(response['output']) | |
| except Exception as e: | |
| print(e) | |
| continue | |