leofilo commited on
Commit
855c6c2
·
verified ·
1 Parent(s): b0878aa

Upload 5 files

Browse files
Files changed (5) hide show
  1. Dockerfile +36 -0
  2. configuration.py +28 -0
  3. main.py +140 -0
  4. requirements.txt +19 -0
  5. task_maistro_production.py +448 -0
Dockerfile ADDED
@@ -0,0 +1,36 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Task Maistro - Simplified Dockerfile for Redis + Railway + Gradio
2
+ FROM python:3.11-slim
3
+
4
+ # Variables de entorno para un comportamiento predecible de Python
5
+ ENV PYTHONUNBUFFERED=1 \
6
+ PYTHONDONTWRITEBYTECODE=1 \
7
+ PYTHONPATH=/app \
8
+ PIP_NO_CACHE_DIR=1 \
9
+ PIP_DISABLE_PIP_VERSION_CHECK=1
10
+
11
+ # Directorio de trabajo
12
+ WORKDIR /app
13
+
14
+ # Instala solo lo esencial del sistema
15
+ RUN apt-get update && apt-get install -y --no-install-recommends \
16
+ && apt-get clean && rm -rf /var/lib/apt/lists/*
17
+
18
+ # Copia y actualiza pip + instala dependencias
19
+ COPY requirements.txt .
20
+ RUN pip install --upgrade pip setuptools wheel
21
+ RUN pip install -r requirements.txt
22
+
23
+ # Copia los archivos de la aplicación
24
+ COPY main.py .
25
+ COPY task_maistro_production.py .
26
+ COPY configuration.py .
27
+
28
+ # Crea un usuario no-root por seguridad
29
+ RUN useradd -m -u 1000 appuser && chown -R appuser:appuser /app
30
+ USER appuser
31
+
32
+ # Expone el puerto que Gradio usará
33
+ EXPOSE 8080
34
+
35
+ # Comando para ejecutar tu app
36
+ CMD ["python", "main.py"]
configuration.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dataclasses import dataclass, field, fields
3
+ from typing import Any, Optional
4
+
5
+ from langchain_core.runnables import RunnableConfig
6
+ from dataclasses import dataclass
7
+
8
+ @dataclass(kw_only=True)
9
+ class Configuration:
10
+ """The configurable fields for the chatbot."""
11
+ user_id: str = "default-user"
12
+ todo_category: str = "general"
13
+ task_maistro_role: str = "You are a helpful task management assistant. You help you create, organize, and manage the user's ToDo list."
14
+
15
+ @classmethod
16
+ def from_runnable_config(
17
+ cls, config: Optional[RunnableConfig] = None
18
+ ) -> "Configuration":
19
+ """Create a Configuration instance from a RunnableConfig."""
20
+ configurable = (
21
+ config["configurable"] if config and "configurable" in config else {}
22
+ )
23
+ values: dict[str, Any] = {
24
+ f.name: os.environ.get(f.name.upper(), configurable.get(f.name))
25
+ for f in fields(cls)
26
+ if f.init
27
+ }
28
+ return cls(**{k: v for k, v in values.items() if v})
main.py ADDED
@@ -0,0 +1,140 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python3
2
+ """
3
+ Task Maistro Assistant - CLEAN DEPLOYMENT v3.0.0
4
+ Railway deployment with threading timeout fixes
5
+ """
6
+ import gradio as gr
7
+ import os
8
+ from dotenv import load_dotenv
9
+
10
+ # Load environment variables
11
+ load_dotenv()
12
+
13
+
14
+ from task_maistro_production import graph as compiled_graph
15
+ from langchain_core.messages import HumanMessage
16
+ import time
17
+
18
+ # The graph is already compiled with stable in-memory backends in task_maistro_production.py
19
+ print("✅ Graph imported successfully")
20
+ print(f"🔍 Graph type: {type(compiled_graph)}")
21
+ print("🚀 Using pre-compiled graph with stable in-memory backends...")
22
+ print("Graph ready!")
23
+
24
+
25
+ def chat_with_assistant(message, history):
26
+
27
+ try:
28
+ # Check if OpenAI API key is available
29
+ openai_key = os.getenv("OPENAI_API_KEY")
30
+ if not openai_key:
31
+ return "Error: OPENAI_API_KEY no está configurada. Por favor, configura la variable de entorno."
32
+
33
+ # Create config with default values
34
+ config = {
35
+ "configurable": {
36
+ "user_id": "default-user",
37
+ "todo_category": "general",
38
+ "task_maistro_role": "You are a helpful task management assistant. You help you create, organize, and manage the user's ToDo list."
39
+ },
40
+ "thread_id": "default-thread"
41
+ }
42
+
43
+ # Create the input message
44
+ input_message = {"messages": [HumanMessage(content=message)]}
45
+
46
+ response = compiled_graph.invoke(input_message, config=config)
47
+
48
+ # Extract the assistant's response
49
+ assistant_message = response["messages"][-1].content
50
+
51
+ return assistant_message
52
+
53
+ except Exception as e:
54
+ error_msg = f"Error: {str(e)}"
55
+ print(f"Application error: {error_msg}")
56
+ return error_msg
57
+
58
+
59
+ def clear_chat():
60
+ """Clear the chat history"""
61
+ return []
62
+
63
+ # Create the Gradio interface
64
+ with gr.Blocks(title="Task Maistro Assistant", theme=gr.themes.Soft()) as app:
65
+ gr.Markdown("# 🤖 Task Maistro Assistant")
66
+ gr.Markdown("""
67
+ Tu asistente personal para gestionar tareas y recordatorios. Comparte tus tareas conmigo y te ayudaré a organizarlas.
68
+
69
+ **🏗️ Arquitectura:**
70
+ - 🔴 **Redis**: Estado de conversación (checkpointer) y datos persistentes (store)
71
+ - 🧠 **LangGraph**: Motor de inteligencia artificial
72
+ """)
73
+
74
+ with gr.Row():
75
+ with gr.Column(scale=4):
76
+ chatbot = gr.Chatbot(
77
+ height=500,
78
+ placeholder="Hola! Soy tu asistente de tareas. ¿En qué puedo ayudarte hoy?",
79
+ bubble_full_width=False
80
+ )
81
+ with gr.Row():
82
+ access_key_input = gr.Textbox(
83
+ label="Clave de acceso",
84
+ placeholder="Ingresa la clave de acceso para usar el asistente",
85
+ type="password",
86
+ )
87
+
88
+ with gr.Row():
89
+ msg = gr.Textbox(
90
+ placeholder="Escribe tu mensaje aquí...",
91
+ scale=4,
92
+ container=False
93
+ )
94
+ send_btn = gr.Button("Enviar", variant="primary", scale=1)
95
+ clear_btn = gr.Button("Limpiar", variant="secondary", scale=1)
96
+
97
+ # Event handlers
98
+ def respond(message, history, access_key):
99
+
100
+ if access_key != os.getenv("ACCESS_KEY"):
101
+ return history, "Clave de acceso incorrecta. Por favor, inténtalo de nuevo."
102
+
103
+ if message.strip() == "":
104
+ return history, ""
105
+
106
+ # Get response from assistant
107
+ bot_response = chat_with_assistant(message, history)
108
+
109
+ # Add to history
110
+ history.append([message, bot_response])
111
+
112
+ return history, ""
113
+
114
+ # Bind events
115
+ msg.submit(respond, [msg, chatbot, access_key_input], [chatbot, msg])
116
+ send_btn.click(respond, [msg, chatbot, access_key_input], [chatbot, msg])
117
+ clear_btn.click(clear_chat, None, chatbot)
118
+
119
+
120
+ if __name__ == "__main__":
121
+ # Get port from environment variable (Railway sets this)
122
+ port = int(os.getenv("PORT", 8080)) # Changed default to match Dockerfile
123
+
124
+ # Determine if we're in production (Railway) or development
125
+ is_production = os.getenv("RAILWAY_ENVIRONMENT") is not None
126
+ server_name = "0.0.0.0" if is_production else "127.0.0.1"
127
+
128
+ # Launch the app
129
+ print(f"Starting application on port {port}")
130
+ print(f"Environment: {'Production (Railway)' if is_production else 'Development (Local)'}")
131
+ print(f"🌐 Access the app at: http://{'0.0.0.0' if is_production else 'localhost'}:{port}")
132
+
133
+ app.launch(
134
+ server_name=server_name,
135
+ server_port=port,
136
+ share=False,
137
+ show_error=True,
138
+ inbrowser=not is_production, # Don't auto-open browser in production
139
+ quiet=is_production # Reduce logging in production
140
+ )
requirements.txt ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Dependencias esenciales para producción Railway
2
+
3
+ gradio==4.44.0
4
+ python-dotenv==1.0.0
5
+ langchain-core>=0.3.0
6
+ langchain-openai>=0.2.8
7
+ langgraph==0.4.7
8
+ langgraph-prebuilt
9
+ langgraph-sdk
10
+
11
+
12
+
13
+ langgraph-checkpoint-postgres
14
+
15
+ trustcall==0.0.20
16
+ pydantic==2.9.2
17
+ httpx==0.27.2
18
+ typing-extensions==4.12.2
19
+ psycopg[binary]
task_maistro_production.py ADDED
@@ -0,0 +1,448 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #%%
2
+ # ---------------------------------------------
3
+ # Task Maistro Assistant - Persistencia Railway
4
+ # Arquitectura: Estado temporal en memoria (MemorySaver), datos persistentes en Postgres (PostgresStore)
5
+ # No se usa Redis ni ningún otro checkpointer persistente
6
+ # ---------------------------------------------
7
+
8
+ import uuid
9
+ import os
10
+ from datetime import datetime
11
+ import json
12
+ from contextlib import ExitStack
13
+
14
+
15
+ # Core imports with error handling
16
+ from pydantic import BaseModel, Field
17
+ from trustcall import create_extractor
18
+ from typing import Literal, Optional, TypedDict
19
+ from langchain_core.runnables import RunnableConfig
20
+ from langchain_core.messages import merge_message_runs
21
+ from langchain_core.messages import SystemMessage, HumanMessage
22
+ from langchain_openai import ChatOpenAI
23
+ #from langgraph.checkpoint.memory import MemorySaver
24
+ # from langgraph.store.memory import InMemoryStore
25
+
26
+ from langgraph.store.base import BaseStore
27
+ from langgraph.checkpoint.postgres import PostgresSaver
28
+ from langgraph.store.postgres import PostgresStore
29
+
30
+
31
+ from langgraph.graph import StateGraph, MessagesState, START, END
32
+
33
+ import configuration
34
+
35
+ import os
36
+ from dotenv import load_dotenv
37
+
38
+ load_dotenv()
39
+
40
+ ## Schema definitions##
41
+ #%%
42
+ # User profile schema
43
+ class Profile(BaseModel):
44
+ """This is the profile of the user you are chatting with"""
45
+ name: Optional[str] = Field(description="The user's name", default=None)
46
+ location: Optional[str] = Field(description="The user's location", default=None)
47
+ job: Optional[str] = Field(description="The user's job", default=None)
48
+ connections: list[str] = Field(
49
+ description="Personal connection of the user, such as family members, friends, or coworkers",
50
+ default_factory=list
51
+ )
52
+ interests: list[str] = Field(
53
+ description="Interests that the user has",
54
+ default_factory=list
55
+ )
56
+
57
+ # ToDo schema
58
+ class ToDo(BaseModel):
59
+ task: str = Field(description="The task to be completed.")
60
+ time_to_complete: Optional[int] = Field(description="Estimated time to complete the task (minutes).")
61
+ deadline: Optional[datetime] = Field(
62
+ description="When the task needs to be completed by (if applicable)",
63
+ default=None
64
+ )
65
+ solutions: list[str] = Field(
66
+ description="List of specific, actionable solutions (e.g., specific ideas, service providers, or concrete options relevant to completing the task)",
67
+ min_items=1,
68
+ default_factory=list
69
+ )
70
+ status: Literal["not started", "in progress", "done", "archived"] = Field(
71
+ description="Current status of the task",
72
+ default="not started"
73
+ )
74
+
75
+ ## Initialize the model and tools
76
+
77
+ # Update memory tool
78
+ class UpdateMemory(TypedDict):
79
+ """ Decision on what memory type to update """
80
+ update_type: Literal['user', 'todo', 'instructions']
81
+
82
+
83
+ # Initialize the model - lazy loading to ensure API key is available
84
+ def get_model():
85
+ """Get ChatOpenAI model with proper error handling and Railway-specific timeouts"""
86
+ openai_key = os.getenv("OPENAI_API_KEY")
87
+ if not openai_key:
88
+ print("Warning: OPENAI_API_KEY is not set. OpenAI calls may fail.")
89
+ # Railway-specific configuration with timeouts to prevent hanging
90
+ return ChatOpenAI(
91
+ model="gpt-4o-mini",
92
+ temperature=0,
93
+ timeout=30, # 30 second timeout for Railway
94
+ max_retries=2, # Fewer retries for faster failure detection
95
+ request_timeout=30 # Request-specific timeout
96
+ )
97
+
98
+ model = get_model()
99
+
100
+ ## Create the Trustcall extractors for updating the user profile and ToDo list
101
+ profile_extractor= create_extractor(
102
+ model,
103
+ tools=[Profile],
104
+ tool_choice="Profile",
105
+ )
106
+
107
+ ## Prompts
108
+
109
+ # Chatbot instruction for choosing what to update and what tools to call
110
+ MODEL_SYSTEM_MESSAGE = """{task_maistro_role}
111
+
112
+ You have a long term memory which keeps track of three things:
113
+ 1. The user's profile (general information about them)
114
+ 2. The user's ToDo list
115
+ 3. General instructions for updating the ToDo list
116
+
117
+ Here is the current User Profile (may be empty if no information has been collected yet):
118
+ <user_profile>
119
+ {user_profile}
120
+ </user_profile>
121
+
122
+ Here is the current ToDo List (may be empty if no tasks have been added yet):
123
+ <todo>
124
+ {todo}
125
+ </todo>
126
+
127
+ Here are the current user-specified preferences for updating the ToDo list (may be empty if no preferences have been specified yet):
128
+ <instructions>
129
+ {instructions}
130
+ </instructions>
131
+
132
+ Here are your instructions for reasoning about the user's messages:
133
+
134
+ 1. Reason carefully about the user's messages as presented below.
135
+
136
+ 2. Decide whether any of the your long-term memory should be updated:
137
+ - If personal information was provided about the user, update the user's profile by calling UpdateMemory tool with type `user`
138
+ - If tasks are mentioned, update the ToDo list by calling UpdateMemory tool with type `todo`
139
+ - If the user has specified preferences for how to update the ToDo list, update the instructions by calling UpdateMemory tool with type `instructions`
140
+
141
+ 3. Tell the user that you have updated your memory, if appropriate:
142
+ - Do not tell the user you have updated the user's profile
143
+ - Tell the user them when you update the todo list
144
+ - Do not tell the user that you have updated instructions
145
+
146
+ 4. Err on the side of updating the todo list. No need to ask for explicit permission.
147
+
148
+ 5. Respond naturally to user user after a tool call was made to save memories, or if no tool call was made."""
149
+
150
+ # Trustcall instruction
151
+ TRUSTCALL_INSTRUCTION = """Reflect on following interaction.
152
+
153
+ Use the provided tools to retain any necessary memories about the user.
154
+
155
+ Use parallel tool calling to handle updates and insertions simultaneously.
156
+
157
+ System Time: {time}"""
158
+
159
+ # Instructions for updating the ToDo list
160
+ CREATE_INSTRUCTIONS = """Reflect on the following interaction.
161
+
162
+ Based on this interaction, update your instructions for how to update ToDo list items. Use any feedback from the user to update how they like to have items added, etc.
163
+
164
+ Your current instructions are:
165
+
166
+ <current_instructions>
167
+ {current_instructions}
168
+ </current_instructions>"""
169
+
170
+
171
+ #########################################################################################################################################
172
+ ## Node definitions
173
+
174
+ def task_mAIstro(state: MessagesState, config: RunnableConfig, store: BaseStore):
175
+
176
+ """Load memories from the store and use them to personalize the chatbot's response."""
177
+
178
+ # Get the user ID from the config
179
+ configurable = configuration.Configuration.from_runnable_config(config)
180
+ user_id = configurable.user_id #"default-user"
181
+ todo_category = configurable.todo_category #"generals"
182
+ task_maistro_role = configurable.task_maistro_role
183
+
184
+ user_profile = None
185
+ todo = ""
186
+ instructions = ""
187
+
188
+ #############################################################################
189
+
190
+ namespace = ("profile", todo_category, user_id)
191
+ memories = store.search(namespace)
192
+ print(f"Memories for namespace {namespace}:")
193
+ print(memories)
194
+
195
+ if memories:
196
+ profile_data = memories[0].value
197
+ if isinstance(profile_data, str): # Si se serializó como cadena, deserealizar
198
+ profile_data = json.loads(profile_data)
199
+ user_profile = Profile.model_validate(profile_data).model_dump_json(indent=2)
200
+ else:
201
+ user_profile = None
202
+ ##################################################################################
203
+
204
+ # Retrieve people memory from the store
205
+ namespace = ("todo", todo_category, user_id)
206
+ memories = store.search(namespace)
207
+
208
+
209
+ todo_list_formatted = []
210
+ if memories:
211
+ for mem in memories:
212
+ todo_data = mem.value
213
+ if isinstance(todo_data, str):
214
+ todo_data = json.loads(todo_data)
215
+ todo_list_formatted.append(json.dumps(todo_data))
216
+ todo = "\n".join(todo_list_formatted)
217
+
218
+ ##################################################################################
219
+
220
+ # Retrieve custom instructions
221
+ namespace = ("instructions", todo_category, user_id)
222
+ memories = store.search(namespace)
223
+ if memories:
224
+ instructions_data = memories[0].value
225
+ if isinstance(instructions_data, str):
226
+ # Las instrucciones pueden ser una cadena simple
227
+ instructions = instructions_data
228
+ else: # Si se guardó como JSON, convertir a cadena.
229
+ instructions = json.dumps(instructions_data)
230
+ else:
231
+ instructions = ""
232
+
233
+ ##############################################################################
234
+
235
+ system_msg = MODEL_SYSTEM_MESSAGE.format(task_maistro_role=task_maistro_role, user_profile=user_profile, todo=todo, instructions=instructions)
236
+
237
+ # Respond using memory as well as the chat history
238
+ response = model.bind_tools([UpdateMemory], parallel_tool_calls=False).invoke([SystemMessage(content=system_msg)]+state["messages"])
239
+
240
+ return {"messages": [response]}
241
+
242
+ ########################################################################################################################################
243
+
244
+ def update_profile(state: MessagesState, config: RunnableConfig, store: BaseStore):
245
+
246
+ """Reflect on the chat history and update the memory collection."""
247
+
248
+ # Get the user ID from the config
249
+ configurable = configuration.Configuration.from_runnable_config(config)
250
+ user_id = configurable.user_id
251
+ todo_category = configurable.todo_category
252
+ # Define the namespace for the memories
253
+ namespace = ("profile", todo_category, user_id)
254
+
255
+ ######################################################################
256
+
257
+ # Retrieve the most recent memories for context
258
+ existing_items = store.search(namespace)
259
+
260
+ # Format the existing memories for the Trustcall extractor
261
+ tool_name = "Profile"
262
+ existing_memories = ([(existing_item.key, tool_name, json.loads(existing_item.value) if isinstance(existing_item.value, str) else existing_item.value)
263
+ for existing_item in existing_items]
264
+ if existing_items
265
+ else None
266
+ )
267
+
268
+ ################################################################################
269
+ # Merge the chat history and the instruction
270
+ TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
271
+ updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1])) # Invoke the extractor
272
+ result = profile_extractor.invoke({"messages": updated_messages,
273
+ "existing": existing_memories})
274
+
275
+ ##################################################################################
276
+
277
+ # Save save the memories from Trustcall to the store
278
+ for r, rmeta in zip(result["responses"], result["response_metadata"]):
279
+ profile_field = rmeta.get("json_doc_id", str(uuid.uuid4()))
280
+ store.put(namespace, profile_field, r.model_dump(mode="json"))
281
+
282
+
283
+ tool_calls = state['messages'][-1].tool_calls
284
+ # Return tool message with update verification
285
+ return {"messages": [{"role": "tool", "content": "updated profile", "tool_call_id":tool_calls[0]['id']}]}
286
+
287
+ ####################################################################################################################################
288
+
289
+ def update_todos(state: MessagesState, config: RunnableConfig, store: BaseStore):
290
+
291
+
292
+ # Get the user ID from the config
293
+ configurable = configuration.Configuration.from_runnable_config(config)
294
+ user_id = configurable.user_id
295
+ todo_category = configurable.todo_category
296
+
297
+ # Define the namespace for the memories
298
+ namespace = ("todo", todo_category, user_id)
299
+
300
+
301
+ ##################################################################################
302
+
303
+ # Retrieve the most recent memories for context
304
+ existing_items = store.search(namespace)
305
+
306
+ # Format the existing memories for the Trustcall extractor
307
+ tool_name = "ToDo"
308
+ existing_memories = ([(existing_item.key, tool_name, json.loads(existing_item.value) if isinstance(existing_item.value, str) else existing_item.value)
309
+ for existing_item in existing_items]
310
+ if existing_items
311
+ else None
312
+ )
313
+
314
+ ##################################################################################
315
+ # Merge the chat history and the instruction
316
+ TRUSTCALL_INSTRUCTION_FORMATTED=TRUSTCALL_INSTRUCTION.format(time=datetime.now().isoformat())
317
+ updated_messages=list(merge_message_runs(messages=[SystemMessage(content=TRUSTCALL_INSTRUCTION_FORMATTED)] + state["messages"][:-1]))
318
+
319
+ # Create the Trustcall extractor for updating the ToDo list
320
+ todo_extractor = create_extractor(
321
+ model,
322
+ tools=[ToDo],
323
+ tool_choice=tool_name,
324
+ enable_inserts=True
325
+ )
326
+
327
+ # Invoke the extractor
328
+ result = todo_extractor.invoke({"messages": updated_messages,
329
+ "existing": existing_memories})
330
+
331
+
332
+ ############################################################################################
333
+
334
+ # Save save the memories from Trustcall to the store
335
+ for r, rmeta in zip(result["responses"], result["response_metadata"]):
336
+ todo_id = rmeta.get("json_doc_id", str(uuid.uuid4()))
337
+ store.put(namespace, todo_id, r.model_dump(mode="json"))
338
+
339
+ #################################################################################################
340
+
341
+ # Respond to the tool call made in task_mAIstro, confirming the update
342
+ tool_calls = state['messages'][-1].tool_calls
343
+
344
+ # Extract the changes made by Trustcall and add the the ToolMessage returned to task_mAIstro
345
+ todo_update_msg = "Updated ToDo list:\n"
346
+ return {"messages": [{"role": "tool", "content": todo_update_msg, "tool_call_id":tool_calls[0]['id']}]}
347
+
348
+ #########################################################################################################################################
349
+ def update_instructions(state: MessagesState, config: RunnableConfig, store: BaseStore):
350
+
351
+ """Reflect on the chat history and update the memory collection."""
352
+
353
+ # Get the user ID from the config
354
+ configurable = configuration.Configuration.from_runnable_config(config)
355
+ user_id = configurable.user_id
356
+ todo_category = configurable.todo_category
357
+
358
+ namespace = ("instructions", todo_category, user_id)
359
+
360
+ ############################################################################################################3
361
+
362
+
363
+ existing_memory_item = store.get(namespace, "user_instructions")
364
+ existing_instructions = existing_memory_item.value if existing_memory_item else None
365
+ if existing_instructions and isinstance(existing_instructions, str):
366
+ try:
367
+ existing_instructions = json.loads(existing_instructions) # Si se guardó como JSON
368
+
369
+ except json.JSONDecodeError:
370
+ pass
371
+ #####################################################################################################################
372
+
373
+
374
+ # Format the memory in the system prompt
375
+ system_msg = CREATE_INSTRUCTIONS.format(current_instructions=existing_instructions if existing_instructions else "")
376
+ new_memory = model.invoke([SystemMessage(content=system_msg)]+state['messages'][:-1] + [HumanMessage(content="Please update the instructions based on the conversation")])
377
+
378
+
379
+ ###########################################################################################################
380
+
381
+ # Overwrite the existing memory in the store
382
+ key = "user_instructions"
383
+ store.put(namespace, key, new_memory.content)
384
+
385
+ #########################################################################################################
386
+
387
+ tool_calls = state['messages'][-1].tool_calls
388
+ # Return tool message with update verification
389
+ return {"messages": [{"role": "tool", "content": "updated instructions", "tool_call_id":tool_calls[0]['id']}]}
390
+
391
+
392
+ ###########################################################################################################################################
393
+ # Conditional edge
394
+ def route_message(state: MessagesState, config: RunnableConfig, store: BaseStore):
395
+
396
+ """Reflect on the memories and chat history to decide whether to update the memory collection."""
397
+ message = state['messages'][-1]
398
+ if len(message.tool_calls) ==0:
399
+ return END
400
+ else:
401
+ tool_call = message.tool_calls[0]
402
+ if tool_call['args']['update_type'] == "user":
403
+ return "update_profile"
404
+ elif tool_call['args']['update_type'] == "todo":
405
+ return "update_todos"
406
+ elif tool_call['args']['update_type'] == "instructions":
407
+ return "update_instructions"
408
+ else:
409
+ raise ValueError
410
+
411
+ #######################################################################################################
412
+
413
+ # Create the graph + all nodes
414
+ builder = StateGraph(MessagesState, config_schema=configuration.Configuration)
415
+
416
+ # Define the flow of the memory extraction process
417
+ builder.add_node(task_mAIstro)
418
+ builder.add_node(update_todos)
419
+ builder.add_node(update_profile)
420
+ builder.add_node(update_instructions)
421
+
422
+ # Define the flow
423
+ builder.add_edge(START, "task_mAIstro")
424
+ builder.add_conditional_edges("task_mAIstro", route_message)
425
+ builder.add_edge("update_todos", "task_mAIstro")
426
+ builder.add_edge("update_profile", "task_mAIstro")
427
+ builder.add_edge("update_instructions", "task_mAIstro")
428
+ #######################################################################################
429
+ POSTGRES_URI = os.getenv("POSTGRES_URI")
430
+
431
+
432
+
433
+ # Abre ambos recursos como context managers
434
+ exit_stack = ExitStack()
435
+ checkpointer = exit_stack.enter_context(PostgresSaver.from_conn_string(POSTGRES_URI))
436
+ store = exit_stack.enter_context(PostgresStore.from_conn_string(POSTGRES_URI))
437
+ checkpointer.setup()
438
+ store.setup()
439
+
440
+ # Compile the graph
441
+ graph = builder.compile(checkpointer=checkpointer, store=store)
442
+
443
+
444
+ #%%
445
+ __all__ = ["graph"]
446
+
447
+
448
+ # %%