Spaces:
Sleeping
Sleeping
Luigi D'Addona
commited on
Commit
·
260eec5
1
Parent(s):
7cfe271
aggiunte istruzioni generali per l'agent
Browse files
agent.py
CHANGED
|
@@ -25,6 +25,13 @@ GEMINI_TEMPERATURE = float(os.environ.get("GEMINI_TEMPERATURE"))
|
|
| 25 |
|
| 26 |
TOOLS_CALL_DELAY = 1.5
|
| 27 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 28 |
#
|
| 29 |
# Inizializza il modello e gli associa i tool
|
| 30 |
#
|
|
@@ -90,13 +97,23 @@ def call_model( state: AgentState, config: RunnableConfig):
|
|
| 90 |
# Invoke the model with the system prompt and the messages
|
| 91 |
#response = chat_with_tools.invoke(state["messages"], config)
|
| 92 |
|
| 93 |
-
# Modo 2)
|
| 94 |
# Create a copy to avoid modifying the original state and append instruction to the end
|
| 95 |
-
messages = state["messages"][:]
|
| 96 |
-
messages.append(
|
| 97 |
-
|
| 98 |
-
) # Append instruction to the end
|
| 99 |
-
response = chat_with_tools.invoke(messages, config)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 100 |
|
| 101 |
# We return a list, because this will get added to the existing messages state using the add_messages reducer
|
| 102 |
return {"messages": [response]}
|
|
|
|
| 25 |
|
| 26 |
TOOLS_CALL_DELAY = 1.5
|
| 27 |
|
| 28 |
+
GENERAL_AGENT_INSTRUCTIONS = """You are a general AI assistant. I will ask you a question.
|
| 29 |
+
Your answer should be a number OR as few words as possible OR a comma separated list of numbers and/or strings.
|
| 30 |
+
If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise.
|
| 31 |
+
If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise.
|
| 32 |
+
If you are asked for a comma separated list, apply the above rules depending of whether the element to be put in the list is a number or a string.
|
| 33 |
+
Provide only the answer, without notes, explanations or comments."""
|
| 34 |
+
|
| 35 |
#
|
| 36 |
# Inizializza il modello e gli associa i tool
|
| 37 |
#
|
|
|
|
| 97 |
# Invoke the model with the system prompt and the messages
|
| 98 |
#response = chat_with_tools.invoke(state["messages"], config)
|
| 99 |
|
| 100 |
+
# Modo 2) - aggiunge in fondo alcune istruzioni
|
| 101 |
# Create a copy to avoid modifying the original state and append instruction to the end
|
| 102 |
+
# messages = state["messages"][:]
|
| 103 |
+
# messages.append(
|
| 104 |
+
# HumanMessage(content="Provide only the answer, without explanations or comments.")
|
| 105 |
+
# ) # Append instruction to the end
|
| 106 |
+
# response = chat_with_tools.invoke(messages, config)
|
| 107 |
+
|
| 108 |
+
# Modo 3)
|
| 109 |
+
# Create a new list for messages to send to the LLM
|
| 110 |
+
# Start with the general instructions
|
| 111 |
+
messages_to_send = [HumanMessage(content=GENERAL_AGENT_INSTRUCTIONS)]
|
| 112 |
+
|
| 113 |
+
# Append all existing messages from the agent state
|
| 114 |
+
messages_to_send.extend(state["messages"])
|
| 115 |
+
|
| 116 |
+
response = chat_with_tools.invoke(messages_to_send)
|
| 117 |
|
| 118 |
# We return a list, because this will get added to the existing messages state using the add_messages reducer
|
| 119 |
return {"messages": [response]}
|