Spaces:
Sleeping
Sleeping
File size: 5,291 Bytes
5dfcdef 6109e7b 5dfcdef 6109e7b 5dfcdef 6109e7b 5dfcdef |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 |
from langchain_ollama import ChatOllama
from langchain_together import ChatTogether
from langchain_core.messages import SystemMessage, HumanMessage
from langgraph.prebuilt import ToolNode
from langchain_core.output_parsers import JsonOutputParser
from langchain_core.prompts import PromptTemplate
from langchain_core.messages import AIMessage
from pydantic import BaseModel, Field
from src.state import AgentState
from src.tools import (
calculator,
wiki_search,
web_search,
reverse_string,
tool_download_image,
tool_read_files,
)
class AnswerTemplate(BaseModel):
final_answer: str = Field(description="Final answer to the question")
tools = [
calculator,
wiki_search,
# web_search,
reverse_string,
# tool_download_image,
tool_read_files,
]
def get_tool_node(state: AgentState):
return ToolNode(tools)
# Assistant node - generates responses
def assistant(state: AgentState):
"""Generate a response using the LLM."""
'''
llama fast but dont use tools
meta-llama/Llama-3.3-70B-Instruct-Turbo
meta-llama/Llama-3-70B-Instruct-Turbo
meta-llama/Meta-Llama-3-70B-Instruct-Turbo
meta-llama/Llama-3-70b-chat-hf
Qwen/Qwen2.5-72B-Instruct-Turbo
Qwen/Qwen3-235B-A22B-Instruct-2507-tput
'''
# llm = ChatOllama(
# # model="llama3.2",
# model="qwen3",
# # model="qwen3:4b",
# temperature=0,
# num_ctx=16384,
# )
llm = ChatTogether(
model="Qwen/QwQ-32B",
max_tokens=None,
temperature=0,
timeout=None,
max_retries=2,
top_p=0.8,
# truncation='auto',
)
# llm = ChatTogether(
# model="deepseek-ai/DeepSeek-R1-Distill-Qwen-14B",
# max_tokens=None,
# temperature=0,
# timeout=None,
# max_retries=4,
# )
messages = []
init = False
if len(state["messages"]) == 0:
if len(state["file_name"]) == 0:
human_message = f'{state["question"]}'
else:
human_message = f'{state["question"]} File: {state["file_name"]}'
messages = [
SystemMessage(content=state["system_message"]),
HumanMessage(content=human_message),
]
init = True
for m in messages:
m.pretty_print()
# Bind tools to the LLM
chat_with_tools = llm.bind_tools(tools)
response = chat_with_tools.invoke(messages if init else state["messages"])
messages.append(response)
# print(response)
messages[-1].pretty_print()
# print(f"Assistant response: {response.content[:50]}...")
return {
"messages": messages,
"last_ai_message": response.content, # if state["messages"] and isinstance(state["messages"][-1], AIMessage) else None
}
# def validate_answer(state: AgentState):
# """Validate the final answer."""
# llm = ChatOllama(
# model="llama3.2",
# # model="qwen3",
# # model="qwen3:4b",
# temperature=0,
# )
# def escape_braces(text):
# return text.replace("{", "{{").replace("}", "}}")
# query = "---\n\nYou are given a conversation between a human and an AI agent. Identify the final answer provided by the agent. Then, format that final answer according to the formatting rules described in the system message, but do not alter the content of the answer itself. Only apply formatting as instructed. Answer in JSON format."
# # Set up a parser + inject instructions into the prompt template.
# '''
# Создаётся парсер, который преобразует ответ модели в JSON-структуру, соответствующую AnswerTemplate (предположительно, это Pydantic-модель с полем final_answer).
# https://python.langchain.com/docs/how_to/output_parser_json/
# '''
# parser = JsonOutputParser(pydantic_object=AnswerTemplate)
# prompt = PromptTemplate(
# template=(
# f"SYSTEM MESSAGE: {state['system_message']}\n\n"
# f"HUMAN QUERY: {escape_braces(state['question'])}\n\n"
# f"AGENT ANSWER: {escape_braces(state['last_ai_message'])}\n\n"
# f"{query}\n\n"
# "{format_instructions}"
# ),
# input_variables=["query"],
# partial_variables={"format_instructions": parser.get_format_instructions()},
# )
# # print(prompt)
# chain = prompt | llm | parser
# # final_answer = chain.invoke(
# # {"format_instructions": parser.get_format_instructions()}
# # )
# final_answer = chain.invoke({"query": query})
# print(final_answer)
# final_answer = final_answer["final_answer"]
# # logger.info(f"Final answer: {final_answer}")
# return {"final_answer": final_answer}
def validate_answer(state: AgentState):
"""Validate the final answer."""
pattern = 'FINAL ANSWER: '
i = state['last_ai_message'].find(pattern)
final_answer = state['last_ai_message'][i + len(pattern):]
print(final_answer)
return {"final_answer": final_answer}
def ready_to_answer(state: AgentState):
if state["ready_to_answer"]:
return "validate_answer"
else:
return "assistant" |