dmpantiu commited on
Commit
754cec9
·
unverified ·
1 Parent(s): 27b66c3

Upload 22 files

Browse files
.gitattributes CHANGED
@@ -33,6 +33,8 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
36
  *.nc filter=lfs diff=lfs merge=lfs -text
37
  *.sqlite3 filter=lfs diff=lfs merge=lfs -text
38
  *.shp filter=lfs diff=lfs merge=lfs -text
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ img/11111111.png filter=lfs diff=lfs merge=lfs -text
37
+ img/2222222.png filter=lfs diff=lfs merge=lfs -text
38
  *.nc filter=lfs diff=lfs merge=lfs -text
39
  *.sqlite3 filter=lfs diff=lfs merge=lfs -text
40
  *.shp filter=lfs diff=lfs merge=lfs -text
img/11111111.png ADDED

Git LFS Details

  • SHA256: 88aa00205199861ce8ae0432d974e85aa7faf612bf4e4cc5c8b2f20f99360fe0
  • Pointer size: 131 Bytes
  • Size of remote file: 493 kB
img/2222222.png ADDED

Git LFS Details

  • SHA256: ce396909073602be7d77ca935768cbc6ff566fe2f4e5bd18ecf94c3338131ff9
  • Pointer size: 131 Bytes
  • Size of remote file: 450 kB
img/pangaea-logo.png ADDED
src/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/__init__.py ADDED
File without changes
src/agents.py ADDED
@@ -0,0 +1,1097 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/agents.py
2
+ import base64
3
+ import os
4
+ import logging
5
+ import functools
6
+ import subprocess
7
+ import sys
8
+ from io import StringIO
9
+ from typing import List, Annotated, Sequence, TypedDict
10
+ import operator
11
+ from langchain.agents import AgentExecutor, create_openai_tools_agent
12
+ from langchain_core.prompts import ChatPromptTemplate, MessagesPlaceholder
13
+ from langchain_core.output_parsers.openai_functions import JsonOutputFunctionsParser
14
+ from langchain_core.messages import BaseMessage, HumanMessage
15
+ from langchain_experimental.tools import PythonREPLTool
16
+ from langchain_openai import ChatOpenAI, OpenAI
17
+ from langchain_core.messages import AIMessage
18
+ from langchain_experimental.agents.agent_toolkits.pandas.base import create_pandas_dataframe_agent
19
+ from pydantic import BaseModel, Field, PrivateAttr
20
+ from langchain_core.tools import StructuredTool
21
+ from langchain.agents.format_scratchpad.openai_tools import (
22
+ format_to_openai_tool_messages,
23
+ )
24
+ from langchain.agents.output_parsers.openai_tools import OpenAIToolsAgentOutputParser
25
+ from langgraph.graph import StateGraph, END
26
+ from langchain.agents.agent_types import AgentType
27
+
28
+
29
+ from langchain_openai import OpenAIEmbeddings
30
+ from langchain_community.vectorstores import Chroma
31
+
32
+ from typing import Any
33
+ import streamlit as st
34
+ import pandas as pd
35
+ import matplotlib.pyplot as plt
36
+ from openai import OpenAI
37
+
38
+
39
+ #sys.path
40
+ # Get the absolute path of the current file (agents.py)
41
+ current_dir = os.path.dirname(os.path.abspath(__file__))
42
+
43
+ # Get the parent directory
44
+ parent_dir = os.path.abspath(os.path.join(current_dir, '..'))
45
+
46
+ # Add the parent directory to sys.path
47
+ if parent_dir not in sys.path:
48
+ sys.path.insert(0, parent_dir)
49
+
50
+
51
+ # Import custom modules
52
+ from .search.search_pg_default import pg_search_default
53
+ from .search.publication_qa_tool import answer_publication_questions, PublicationQAArgs
54
+ from .plotting_tools.hard_agent import plot_master_track_map
55
+ from .plotting_tools.oceanographer_tools import plot_ts_diagram
56
+ from .prompts import Prompts
57
+ from .utils import generate_unique_image_path
58
+ from .config import API_KEY
59
+
60
+
61
+ # 1. Search Agent and Tools
62
+ class CustomPythonREPLTool(PythonREPLTool):
63
+ _datasets: dict = PrivateAttr()
64
+
65
+ def __init__(self, datasets, **kwargs):
66
+ """
67
+ Custom Python REPL tool that injects dataset variables and logs plot generation.
68
+ :param datasets: Dictionary { "dataset_1": <DataFrame>, "dataset_2": <DataFrame>, ... }
69
+ """
70
+ super().__init__(**kwargs)
71
+ self._datasets = datasets
72
+
73
+ def _run(self, query: str, **kwargs) -> Any:
74
+ """
75
+ Execute the user-provided Python code in a local context containing:
76
+ - st (Streamlit)
77
+ - plt (Matplotlib Pyplot)
78
+ - pd (Pandas)
79
+ - All loaded dataset variables (self._datasets)
80
+ - A dynamically generated plot_path
81
+
82
+ If a figure is saved to plot_path, a "plot_generated" event will be logged in session_state["execution_history"].
83
+ """
84
+ import streamlit as st
85
+ import matplotlib.pyplot as plt
86
+ import pandas as pd
87
+ import logging
88
+ from io import StringIO
89
+ from src.utils import log_history_event, generate_unique_image_path
90
+
91
+ # Prepare local context with necessary packages
92
+ local_context = {"st": st, "plt": plt, "pd": pd}
93
+
94
+ # Inject the user’s datasets under the specified variable names (e.g. dataset_1, dataset_2, etc.)
95
+ local_context.update(self._datasets)
96
+
97
+ # Generate a unique file path for the plot (plot_path)
98
+ plot_path = generate_unique_image_path()
99
+ local_context['plot_path'] = plot_path
100
+
101
+ # Redirect stdout so we can capture any output from exec(...)
102
+ old_stdout = sys.stdout
103
+ redirected_output = StringIO()
104
+ sys.stdout = redirected_output
105
+
106
+ try:
107
+ # Execute user code
108
+ exec(query, local_context)
109
+ output = redirected_output.getvalue()
110
+
111
+ except ModuleNotFoundError as e:
112
+ missing_module = e.name
113
+ logging.warning(f"Module '{missing_module}' not found during code execution.")
114
+ return {
115
+ "error": "ModuleNotFoundError",
116
+ "missing_module": missing_module,
117
+ "message": f"The Python module '{missing_module}' is not installed."
118
+ }
119
+ except Exception as e:
120
+ logging.error(f"Error during code execution: {e}")
121
+ return {
122
+ "error": "ExecutionError",
123
+ "message": str(e)
124
+ }
125
+ finally:
126
+ # Restore stdout
127
+ sys.stdout = old_stdout
128
+
129
+ # Check if a plot was actually saved to plot_path
130
+ plot_generated = False
131
+ if os.path.exists(plot_path):
132
+ st.session_state.saved_plot_path = plot_path
133
+ st.session_state.plot_image = plot_path
134
+ st.session_state.new_plot_path = plot_path
135
+ plot_generated = True
136
+
137
+ if plot_generated:
138
+ status_message = f"Plot generated = True. Saved at: {plot_path}"
139
+ logging.info(status_message)
140
+ st.session_state.plot_generated_status = status_message
141
+
142
+ from src.utils import log_history_event
143
+ log_history_event(
144
+ st.session_state,
145
+ "plot_generated",
146
+ {
147
+ "plot_path": plot_path.replace("sandbox:", ""), # Remove sandbox prefix
148
+ "agent": "VisualizationAgent",
149
+ "description": "Python_REPL Generated Plot",
150
+ "content": query # Store the actual code used
151
+ }
152
+ )
153
+
154
+ return {
155
+ "result": f"Execution completed. Plot saved at: {plot_path if plot_generated else 'No plot generated'}",
156
+ "output": output,
157
+ "plot_images": [plot_path] if plot_generated else []
158
+ }
159
+
160
+
161
+
162
+ def search_pg_datasets_tool(query):
163
+ global prompt_search
164
+
165
+ datasets_info = pg_search_default(query)
166
+
167
+ logging.debug("Datasets info: %s", datasets_info)
168
+
169
+ if not datasets_info.empty:
170
+ st.session_state.datasets_info = datasets_info
171
+ st.session_state.messages_search.append({
172
+ "role": "assistant",
173
+ "content": f"**Search query:** {query}"
174
+ })
175
+ # Pass the table as JSON (you can use orient="split" or the default, as long as your UI can parse it)
176
+ st.session_state.messages_search.append({
177
+ "role": "assistant",
178
+ "content": "**Datasets Information:**",
179
+ "table": datasets_info.to_json(orient="split")
180
+ })
181
+
182
+ # Optionally, build a detailed description string for the prompt:
183
+ datasets_description = ""
184
+ for i, row in datasets_info.iterrows():
185
+ datasets_description += (
186
+ f"Dataset {i + 1}:\n"
187
+ f"Name: {row['Name']}\n"
188
+ f"Description: {row['Short Description']}\n"
189
+ f"Parameters: {row['Parameters']}\n\n"
190
+ )
191
+
192
+ prompt_search = (
193
+ f"The user has provided the following query: {query}\n"
194
+ f"Available datasets:\n{datasets_description}\n"
195
+ "Please identify the top two datasets that best match the user's query and explain why they are the most relevant. "
196
+ "Do not suggest datasets without values in the Parameters field, because they cannot be directly downloaded.\n"
197
+ "Respond using the following schema:\n"
198
+ "{dataset name}\n{reason why relevant}\n{propose some short analysis and further questions to answer}"
199
+ )
200
+
201
+ return datasets_info, prompt_search
202
+
203
+
204
+ def create_search_agent(datasets_info=None):
205
+ model_name = st.session_state.get("model_name", "gpt-3.5-turbo")
206
+ if model_name == "o3-mini":
207
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
208
+ else:
209
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
210
+
211
+ # Generate dataset description string
212
+ datasets_description = ""
213
+ if datasets_info is not None:
214
+ for i, row in datasets_info.iterrows():
215
+ datasets_description += f"Dataset {i + 1}:\nName: {row['Name']}\nDOI: {row['DOI']}\nDescription: {row['Short Description']}\nParameters: {row['Parameters']}\n\n"
216
+
217
+ prompt = ChatPromptTemplate.from_messages(
218
+ [
219
+ ("system",
220
+ f"You are a powerful assistant primarily designed to search and retrieve datasets from PANGAEA. Your main goal is to help users find relevant datasets using the search_pg_datasets tool. When a user asks about datasets, always use this tool first to provide the most up-to-date and accurate information.\n\n"
221
+ #f"Here are some datasets returned from the search:\n{datasets_description}"
222
+ "In addition to dataset searches, you have a secondary capability to answer questions about publications related to specific datasets (or in other words what was published based on this dataset). If a user explicitly asks about publications or research findings based on a particular dataset, you can use the answer_publication_questions tool. For example, you can handle queries like 'What was published based on this dataset?' or 'What were the main conclusions from the research using this dataset?'\n\n"
223
+ "Remember:\n"
224
+ "1. Prioritize dataset searches using the search_pg_datasets tool. Make sure that the query you pass to the tool is rephrased so that elastic search gives the best match. Also try not to include words like 'search' and etc. in the search query.\n"
225
+ "2. Only use the answer_publication_questions tool when the user specifically asks about publications or research findings related to a dataset they've already identified. Please make sure that you correctly pass the doi to the tool. It should be doi retrieved after the search (user will point out which dataset it interested in). DO NOT GENERATE DOI ON THIS STEP OUT OF YOUR MIND! JUST TAKE WHAT WAS GIVEN WITH SYSTEM PROMPT.\n"
226
+ "3. If needed, ask the user to clarify which dataset they're referring to before using the publication tool.\n\n"
227
+ "Strive to provide accurate, helpful, and concise responses to user queries."
228
+ ),
229
+ ("user", "{input}"),
230
+ MessagesPlaceholder(variable_name="chat_history"),
231
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
232
+ ]
233
+ )
234
+
235
+ search_tool = StructuredTool.from_function(
236
+ func=search_pg_datasets_tool,
237
+ name="search_pg_datasets",
238
+ description="List datasets from PANGAEA based on a query"
239
+ )
240
+
241
+ publication_qa_tool = StructuredTool.from_function(
242
+ func=answer_publication_questions,
243
+ name="answer_publication_questions",
244
+ description="A tool to answer questions about articles published from this dataset. This will be a journal article for which you should provide the tool with an already structured question about what the user wants. The input should be the DOI of the dataset (e.g. 'https://doi.org/10.1594/PANGAEA.xxxxxx') and the question. The question should be reworded to specifically send it to RAG. E.g. the hypothetical user's question 'Are there any related articles to the first dataset? If so what these articles are about?' will be re-worded for this tool as 'What is this article is about?'",
245
+ args_schema=PublicationQAArgs
246
+ )
247
+
248
+ tools = [search_tool, publication_qa_tool]
249
+
250
+ llm_with_tools = llm.bind_tools(tools)
251
+
252
+ agent = (
253
+ {
254
+ "input": lambda x: x["input"],
255
+ "chat_history": lambda x: x.get("chat_history", []),
256
+ "agent_scratchpad": lambda x: format_to_openai_tool_messages(x["intermediate_steps"]),
257
+ }
258
+ | prompt
259
+ | llm_with_tools
260
+ | OpenAIToolsAgentOutputParser()
261
+ )
262
+
263
+ return AgentExecutor(agent=agent, tools=tools, verbose=True, max_iterations=5)
264
+
265
+ # 2. Visualization and Oceanography Tools
266
+
267
+ class PlotMasterTrackMapArgs(BaseModel):
268
+ dataset_var: str = Field(description="The variable name of the dataset to use (e.g., 'dataset_1', 'dataset_2').")
269
+ main_title: str = Field(description="The main title for the plot.")
270
+ lat_col: str = Field(description="Name of the latitude column.")
271
+ lon_col: str = Field(description="Name of the longitude column.")
272
+ date_col: str = Field(description="Name of the date/time column.")
273
+
274
+ class TSPlotToolArgs(BaseModel):
275
+ dataset_var: str = Field(description="The variable name of the dataset to use (e.g., 'dataset_1', 'dataset_2').")
276
+ main_title: str = Field(description="The main title for the plot.")
277
+ temperature_col: str = Field(description="Name of the temperature column.")
278
+ salinity_col: str = Field(description="Name of the salinity column.")
279
+
280
+ # 3. Agent Creation Functions
281
+ def create_pandas_agent(user_query, datasets_info):
282
+ if st.session_state.model_name == "o3-mini":
283
+ llm = ChatOpenAI(api_key=API_KEY, model_name=st.session_state.model_name)
284
+ else:
285
+ llm = ChatOpenAI(api_key=API_KEY, model_name=st.session_state.model_name)
286
+
287
+ # Assign unique variable names to each dataframe and collect dataframes
288
+ dataset_variables = []
289
+ dataframes = []
290
+ datasets_text = "" # Initialize datasets_text
291
+ for i, info in enumerate(datasets_info, 1): # Start enumeration at 1
292
+ var_name = f"df{i}" # Consistently name as df1, df2, etc.
293
+ dataframes.append(info['dataset']) # Collect dataframes into a list
294
+ dataset_variables.append(var_name)
295
+ # Build datasets_text
296
+ datasets_text += (
297
+ f"Dataset {i}:\n" # Adjust index to match variable naming
298
+ f"Variable Name: {var_name}\n"
299
+ f"Name: {info['name']}\n"
300
+ f"Description: {info['description']}\n"
301
+ f"Head of DataFrame (use it only as an example):\n"
302
+ f"{info['df_head']}\n\n"
303
+ )
304
+
305
+
306
+ # Create a custom system prompt that includes information about each dataframe
307
+ system_prompt = Prompts.generate_pandas_agent_system_prompt(user_query, datasets_text, dataset_variables)
308
+
309
+ # Create a ChatPromptTemplate with the system prompt
310
+ chat_prompt = ChatPromptTemplate.from_messages(
311
+ [
312
+ ("system", system_prompt),
313
+ ("user", "{input}"),
314
+ MessagesPlaceholder(variable_name="chat_history"),
315
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
316
+ ]
317
+ )
318
+
319
+ # Create the pandas dataframe agent with the list of dataframes and the chat prompt
320
+ agent_pandas = create_pandas_dataframe_agent(
321
+ llm=llm,
322
+ df=dataframes, # Pass the list of dataframes
323
+ agent_type=AgentType.OPENAI_FUNCTIONS,
324
+ verbose=True,
325
+ return_intermediate_steps=True,
326
+ max_iterations=5,
327
+ early_stopping_method="generate",
328
+ handle_parsing_errors=True,
329
+ #prefix=system_prompt,
330
+ suffix=system_prompt,
331
+ allow_dangerous_code=True,
332
+ chat_prompt=chat_prompt
333
+ )
334
+
335
+ return agent_pandas
336
+
337
+
338
+ # Define the function to encode the image
339
+ def encode_image(image_path):
340
+ with open(image_path, "rb") as image_file:
341
+ return base64.b64encode(image_file.read()).decode('utf-8')
342
+
343
+ def reflect_on_image(image_path: str) -> str:
344
+ if not os.path.exists(image_path):
345
+ return f"Error: The file {image_path} does not exist."
346
+
347
+ base64_image = encode_image(image_path)
348
+
349
+ prompt = """You are a professional reviewer of scientific images. Your task is to provide review and pass it back to the visual creator agent, so that it could improve it. At each step provide idea for improvements (only if necessary). Be sure to be critical and provide a source for improvement. Conduct a quality check of the provided image using the following criteria:
350
+
351
+ 1. Axis and Font Quality: Evaluate the visibility of axes and appropriateness of font size and style. Are the axes clearly visible and labeled? Is the font legible and suitable for the image size?
352
+ 2. Label Clarity: Assess if labels are well-positioned and not overlapping. Are all labels clearly readable and properly placed?
353
+ 3. Color Scheme: Analyze the color choices. Is the color scheme appropriate for the data presented? Are the colors distinguishable and not causing visual confusion?
354
+ 4. Data Representation: Evaluate how well the data is represented. Are data points clearly visible? Is the chosen chart or graph type appropriate for the data?
355
+ 5. Legend and Scale: Check the presence and clarity of legends and scales. Are they present when necessary and easy to understand?
356
+ 6. Overall Layout: Assess the overall layout and use of space. Is the image well-balanced and visually appealing?
357
+ 7. Technical Issues: Identify any technical problems such as pixelation, blurriness, or artifacts that might affect the image quality.
358
+ 8. Scientific Accuracy: To the best of your ability, comment on whether the image appears scientifically accurate and free from obvious errors or misrepresentations.
359
+ 9. Check that the figure make sense from an observing human's point of view, for example, if the figure have a variable ‘Depth of water or smth’ it should be on the Y-AXIS and go from surface to depth, so minimum at the top, max depth in the bottom. If there are remarks about these things, severely underestimate the final mark for the figure and force agent to redo the graph, with precise instructions. SUPER IMPORTANT -> IF DEPTH OF WATER OR ANY VERTICAL DIMENSIONS ARE PRESENT, AND THEY ARE ON THE HORIZONTAL X-AXIS, AND NOT ON Y-AXIS, RETURN FIGURE BACK WITH SCORE 1/10, PUNISH SEVERELY FOR THIS! <- SUPER IMPORTANT
360
+
361
+ Please provide a structured review addressing each of these points. Conclude with an overall assessment of the image quality, highlighting any significant issues or exemplary aspects. Finally, give the image a score out of 10, where 10 is perfect quality and 1 is unusable.
362
+ """
363
+ openai_client = OpenAI(api_key=API_KEY)
364
+ response = openai_client.chat.completions.create(
365
+ model="gpt-4o",
366
+ messages=[
367
+ {
368
+ "role": "user",
369
+ "content": [
370
+ {"type": "text", "text": prompt},
371
+ {
372
+ "type": "image_url",
373
+ "image_url": {
374
+ "url": f"data:image/png;base64,{base64_image}"
375
+ }
376
+ }
377
+ ]
378
+ }
379
+ ],
380
+ max_tokens=1000
381
+ )
382
+
383
+ return response.choices[0].message.content
384
+
385
+
386
+ # Define the args schema for reflect_on_image
387
+ class ReflectOnImageArgs(BaseModel):
388
+ image_path: str = Field(description="The path to the image to reflect on.")
389
+
390
+ # Define the reflect_on_image tool
391
+ reflect_tool = StructuredTool.from_function(
392
+ func=reflect_on_image,
393
+ name="reflect_on_image",
394
+ description="A tool to reflect on an image and provide feedback for improvements.",
395
+ args_schema=ReflectOnImageArgs
396
+ )
397
+
398
+ #Planning tool
399
+
400
+ class PlanningToolArgs(BaseModel):
401
+ goal: str = Field(
402
+ description="A short statement of the user's main objective or question to create a plan for."
403
+ )
404
+ constraints: List[str] = Field(
405
+ default_factory=list,
406
+ description="Any constraints or conditions to be respected in the plan (e.g., time or resource constraints)."
407
+ )
408
+ user_query: str = Field(
409
+ default="",
410
+ description="The original user query or question that triggered the plan request."
411
+ )
412
+ datasets_summary: str = Field(
413
+ default="",
414
+ description="A concise summary of the current datasets or project context that the plan should consider."
415
+ )
416
+
417
+
418
+ def planning_tool(
419
+ goal: str,
420
+ constraints: List[str],
421
+ user_query: str,
422
+ datasets_summary: str
423
+ ) -> dict:
424
+ """
425
+ A planning function that uses a ChatCompletion to create a step-by-step plan,
426
+ referencing the user query, constraints, and dataset info for context.
427
+ Returns a dict with at least "messages" so it updates the state in langgraph.
428
+ """
429
+
430
+ from langchain_openai import ChatOpenAI
431
+ from langchain_core.messages import AIMessage, SystemMessage, HumanMessage
432
+
433
+ # Create a system prompt that instructs the LLM how to create the plan:
434
+ system_prompt = (
435
+ "You are an advanced 'PlanningTool' that must generate a step-by-step plan. "
436
+ "Consider the user’s ultimate goal, the constraints, the original query, and the dataset context. "
437
+ "Respond with a thorough but concise plan that can be used by the system to coordinate tasks."
438
+ )
439
+
440
+ # We'll build a user message that includes all relevant info:
441
+ # (goal, constraints, user_query, and the dataset summary).
442
+ user_message = (
443
+ f"Goal: {goal}\n\n"
444
+ f"Constraints: {constraints}\n\n"
445
+ f"User Query: {user_query}\n\n"
446
+ f"Dataset Info:\n{datasets_summary}\n\n"
447
+ "Please produce a plan with carefully enumerated steps."
448
+ )
449
+
450
+ # Create an LLM instance
451
+ model_name = st.session_state.get("model_name", "gpt-3.5-turbo")
452
+ if model_name == "o3-mini":
453
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
454
+ else:
455
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
456
+
457
+
458
+ # Construct messages for the chat
459
+ messages = [
460
+ SystemMessage(content=system_prompt),
461
+ HumanMessage(content=user_message)
462
+ ]
463
+
464
+ # Call the LLM
465
+ response = llm(messages)
466
+
467
+ # The text of the plan is in response.content
468
+ final_plan_text = response.content
469
+
470
+ # Return a dictionary that merges into state["messages"]
471
+ # (this is how the graph update won't fail with InvalidUpdateError)
472
+ return {
473
+ "messages": [
474
+ AIMessage(content=final_plan_text, name="Planner")
475
+ ]
476
+ }
477
+
478
+
479
+
480
+
481
+ def install_package(package_name: str, pip_options: str = ""):
482
+ #ALLOWED_PACKAGES = {"matplotlib", "seaborn", "plotly", "pandas", "numpy", "gsw", "scipy"}
483
+ #if package_name not in ALLOWED_PACKAGES:
484
+ # return f"Installation of package '{package_name}' is not allowed."
485
+ try:
486
+ command = [sys.executable, '-m', 'pip', 'install'] + pip_options.split() + [package_name]
487
+ subprocess.check_call(command)
488
+ return f"Package '{package_name}' installed successfully."
489
+ except Exception as e:
490
+ return f"Failed to install package '{package_name}': {e}"
491
+
492
+
493
+ # Define the args schema for install_package
494
+ class InstallPackageArgs(BaseModel):
495
+ package_name: str = Field(description="The name of the package to install.")
496
+ pip_options: str = Field(default="", description="Additional pip options (e.g., '--force-reinstall').")
497
+
498
+ # Create the install_package_tool
499
+ install_package_tool = StructuredTool.from_function(
500
+ func=install_package,
501
+ name="install_package",
502
+ description="Installs a Python package using pip. Use this tool if you encounter a ModuleNotFoundError or need a package that's not installed.",
503
+ args_schema=InstallPackageArgs
504
+ )
505
+
506
+ def get_example_of_visualizations(query: str) -> str:
507
+ """
508
+ Retrieves example visualizations related to the query.
509
+
510
+ Parameters:
511
+ - query (str): The user's query about plotting.
512
+
513
+ Returns:
514
+ - str: The content of the most relevant example file.
515
+ """
516
+ # Initialize embeddings
517
+ #api_key = st.secrets["general"]["openai_api_key"]
518
+ embeddings = OpenAIEmbeddings(api_key=API_KEY)
519
+
520
+ # Load the existing vector store
521
+ vector_store = Chroma(
522
+ collection_name="example_collection",
523
+ embedding_function=embeddings,
524
+ persist_directory=os.path.join('data', 'examples_database', 'chroma_langchain_notebooks')
525
+ )
526
+
527
+ # Perform a similarity search
528
+ results = vector_store.similarity_search_with_score(query, k=1)
529
+
530
+ # Extract the most relevant document
531
+ doc, score = results[0]
532
+
533
+ # Construct the full path to the txt file
534
+ file_name = doc.metadata['source'].lstrip('./')
535
+ full_path = os.path.join('data', 'examples_database', file_name)
536
+
537
+ # Read and return the content of the txt file
538
+ try:
539
+ with open(full_path, 'r', encoding='utf-8') as file:
540
+ content = file.read()
541
+ return content
542
+ except Exception as e:
543
+ logging.error(f"An error occurred while reading the file: {str(e)}")
544
+ return "" # Return empty string if error occurs
545
+
546
+
547
+ class ExampleVisualizationArgs(BaseModel):
548
+ query: str = Field(description="The user's query about plotting.")
549
+
550
+ example_visualization_tool = StructuredTool.from_function(
551
+ func=get_example_of_visualizations,
552
+ name="get_example_of_visualizations",
553
+ description="Retrieves example visualization code related to the user's query.",
554
+ args_schema=ExampleVisualizationArgs
555
+ )
556
+
557
+ ########################################
558
+ # 1) DEFINE THE TOOL FOR LISTING FILES #
559
+ ########################################
560
+
561
+ class ListPlottingDataFilesArgs(BaseModel):
562
+ # No arguments needed here if it just lists everything
563
+ dummy: str = Field(default="", description="(No arguments needed)")
564
+
565
+ def list_plotting_data_files(dummy: str = "") -> str:
566
+ """
567
+ Lists all files and subdirectories under data/plotting_data.
568
+ Returns a single string containing each path on a new line.
569
+ """
570
+ base_dir = os.path.join("data", "plotting_data")
571
+ all_paths = []
572
+
573
+ for root, dirs, files in os.walk(base_dir):
574
+ # Optionally skip hidden dirs/files, etc.
575
+ for filename in files:
576
+ rel_path = os.path.relpath(os.path.join(root, filename), start=base_dir)
577
+ all_paths.append(rel_path)
578
+
579
+ if not all_paths:
580
+ return "No files found in data/plotting_data."
581
+
582
+ return "Files under data/plotting_data:\n" + "\n".join(all_paths)
583
+
584
+ list_plotting_data_files_tool = StructuredTool.from_function(
585
+ func=list_plotting_data_files,
586
+ name="list_plotting_data_files",
587
+ description="Lists all files under data/plotting_data directory (including subfolders).",
588
+ args_schema=ListPlottingDataFilesArgs
589
+ )
590
+
591
+
592
+
593
+ def create_visualization_agent(user_query, datasets_info):
594
+ datasets_text = "" # Initialize datasets_text
595
+ dataset_variables = []
596
+ datasets = {}
597
+ for i, info in enumerate(datasets_info):
598
+ var_name = f"dataset_{i + 1}"
599
+ datasets[var_name] = info['dataset']
600
+ dataset_variables.append(var_name)
601
+ # Build datasets_text
602
+ datasets_text += (
603
+ f"Dataset {i + 1}:\n"
604
+ f"Variable Name: {var_name}\n"
605
+ f"Name: {info['name']}\n"
606
+ f"Description: {info['description']}\n"
607
+ f"Head of DataFrame (use it only as an example):\n"
608
+ f"{info['df_head']}\n\n"
609
+ )
610
+
611
+ # Generate the system prompt using datasets_text
612
+ prompt = Prompts.generate_visualization_agent_system_prompt(user_query, datasets_text, dataset_variables)
613
+
614
+ llm = ChatOpenAI(api_key=API_KEY, model_name=st.session_state.model_name)
615
+ repl_tool = CustomPythonREPLTool(datasets=datasets)
616
+ tools_vis = [
617
+ repl_tool,
618
+ reflect_tool,
619
+ install_package_tool,
620
+ example_visualization_tool,
621
+ list_plotting_data_files_tool
622
+ ]
623
+ agent_visualization = create_openai_tools_agent(
624
+ llm,
625
+ tools=tools_vis,
626
+ prompt=ChatPromptTemplate.from_messages(
627
+ [
628
+ ("system", prompt),
629
+ MessagesPlaceholder(variable_name="messages"),
630
+ MessagesPlaceholder(variable_name="agent_scratchpad")
631
+ ]
632
+ )
633
+ )
634
+ return AgentExecutor(
635
+ agent=agent_visualization,
636
+ tools=tools_vis,
637
+ verbose=True,
638
+ handle_parsing_errors=True,
639
+ return_intermediate_steps=True
640
+ )
641
+
642
+
643
+
644
+
645
+
646
+ def create_hard_coded_visualization_agent(user_query, datasets_info):
647
+ import streamlit as st
648
+ model_name = st.session_state.get("model_name", "gpt-3.5-turbo")
649
+ if model_name == "o3-mini":
650
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
651
+ else:
652
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
653
+
654
+ # Prepare datasets
655
+ datasets = {}
656
+ datasets_text = ""
657
+ dataset_variables = []
658
+ for i, info in enumerate(datasets_info):
659
+ var_name = f"dataset_{i + 1}"
660
+ datasets[var_name] = info['dataset']
661
+ dataset_variables.append(var_name)
662
+ datasets_text += (
663
+ f"Dataset {i + 1}:\n"
664
+ f"Variable Name: {var_name}\n"
665
+ f"Name: {info['name']}\n"
666
+ f"Description: {info['description']}\n"
667
+ f"Head of DataFrame (select appropriate attributes based on this):\n"
668
+ f"{info['df_head']}\n\n"
669
+ )
670
+
671
+ # Generate the system prompt
672
+ system_prompt = Prompts.generate_system_prompt_hard_coded_visualization(user_query, datasets_text, dataset_variables)
673
+
674
+ def plot_master_track_map_tool(dataset_var, main_title, lat_col, lon_col, date_col):
675
+ dataset_df = datasets.get(dataset_var)
676
+ if dataset_df is None:
677
+ return {"result": f"Dataset '{dataset_var}' not found."}
678
+ return plot_master_track_map(main_title=main_title, lat_col=lat_col, lon_col=lon_col, date_col=date_col, dataset_df=dataset_df)
679
+
680
+ # Define visualization tools
681
+ visualization_functions = [
682
+ StructuredTool.from_function(
683
+ func=plot_master_track_map_tool,
684
+ name="plot_master_track_map_tool",
685
+ description="Plot the master track map using the specified dataset.",
686
+ args_schema=PlotMasterTrackMapArgs
687
+ )
688
+ ]
689
+
690
+ # Create the agent with tools and prompt
691
+ agent = create_openai_tools_agent(
692
+ llm,
693
+ tools=visualization_functions,
694
+ prompt=ChatPromptTemplate.from_messages(
695
+ [
696
+ ("system", system_prompt),
697
+ MessagesPlaceholder(variable_name="messages"),
698
+ MessagesPlaceholder(variable_name="agent_scratchpad")
699
+ ]
700
+ )
701
+ )
702
+
703
+ return AgentExecutor(agent=agent, tools=visualization_functions)
704
+
705
+
706
+ # Create Oceanographer Agent
707
+ def create_oceanographer_agent(user_query, datasets_info):
708
+ import streamlit as st
709
+ model_name = st.session_state.get("model_name", "gpt-3.5-turbo")
710
+ if model_name == "o3-mini":
711
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
712
+ else:
713
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
714
+
715
+ # Prepare datasets
716
+ datasets = {}
717
+ datasets_text = ""
718
+ dataset_variables = []
719
+ for i, info in enumerate(datasets_info):
720
+ var_name = f"dataset_{i + 1}"
721
+ datasets[var_name] = info['dataset']
722
+ dataset_variables.append(var_name)
723
+ datasets_text += (
724
+ f"Dataset {i + 1}:\n"
725
+ f"Variable Name: {var_name}\n"
726
+ f"Name: {info['name']}\n"
727
+ f"Description: {info['description']}\n"
728
+ f"Head of DataFrame (select appropriate attributes based on this):\n"
729
+ f"{info['df_head']}\n\n"
730
+ )
731
+
732
+ # Generate the system prompt
733
+ system_prompt = Prompts.generate_system_prompt_oceanographer(user_query, datasets_text, dataset_variables)
734
+
735
+ def plot_ts_diagram_tool(dataset_var, main_title, temperature_col, salinity_col):
736
+ dataset_df = datasets.get(dataset_var)
737
+ if dataset_df is None:
738
+ return {"result": f"Dataset '{dataset_var}' not found."}
739
+ return plot_ts_diagram(main_title=main_title, temperature_col=temperature_col, salinity_col=salinity_col, dataset_df=dataset_df)
740
+
741
+ # Define oceanography tools
742
+ oceanography_functions = [
743
+ StructuredTool.from_function(
744
+ func=plot_ts_diagram_tool,
745
+ name="plot_ts_diagram_tool",
746
+ description="Plot TS diagram using the specified dataset.",
747
+ args_schema=TSPlotToolArgs
748
+ )
749
+ ]
750
+
751
+ # Create the agent with tools and prompt
752
+ agent = create_openai_tools_agent(
753
+ llm,
754
+ tools=oceanography_functions,
755
+ prompt=ChatPromptTemplate.from_messages(
756
+ [
757
+ ("system", system_prompt),
758
+ MessagesPlaceholder(variable_name="messages"),
759
+ MessagesPlaceholder(variable_name="agent_scratchpad")
760
+ ]
761
+ )
762
+ )
763
+
764
+ return AgentExecutor(agent=agent, tools=oceanography_functions)
765
+
766
+
767
+ def initialize_agents(user_query, datasets_info):
768
+ if datasets_info:
769
+ # Create agents
770
+ visualization_agent = create_visualization_agent(
771
+ user_query=user_query,
772
+ datasets_info=datasets_info
773
+ )
774
+
775
+ dataframe_agent = create_pandas_agent(
776
+ user_query=user_query,
777
+ datasets_info=datasets_info
778
+ )
779
+
780
+ hard_coded_visualization_agent = create_hard_coded_visualization_agent(
781
+ user_query=user_query,
782
+ datasets_info=datasets_info
783
+ )
784
+
785
+ oceanographer_agent = create_oceanographer_agent(
786
+ user_query=user_query,
787
+ datasets_info=datasets_info
788
+ )
789
+
790
+ return visualization_agent, dataframe_agent, hard_coded_visualization_agent, oceanographer_agent
791
+ else:
792
+ st.warning("No datasets loaded. Please load datasets first.")
793
+ return None, None, None, None
794
+
795
+
796
+ def agent_node(state, agent, name):
797
+ import streamlit as st # Ensure Streamlit is imported
798
+ logging.debug(f"Entering agent_node for {name}")
799
+
800
+ if 'agent_scratchpad' not in state or not isinstance(state['agent_scratchpad'], list):
801
+ state['agent_scratchpad'] = []
802
+
803
+ user_messages = [msg for msg in state["messages"] if isinstance(msg, HumanMessage)]
804
+ if user_messages:
805
+ last_user_message = user_messages[-1].content
806
+ state['input'] = last_user_message
807
+ else:
808
+ state['input'] = state.get('input', '')
809
+
810
+ if 'plot_images' not in state or not isinstance(state['plot_images'], list):
811
+ state['plot_images'] = []
812
+
813
+ # Invoke the agent
814
+ result = agent.invoke(state)
815
+ last_message_content = result.get("output", "")
816
+ intermediate_steps = result.get("intermediate_steps", [])
817
+ returned_plot_images = result.get("plot_images", []) # Gather newly returned images
818
+
819
+ # Store intermediate steps
820
+ if 'intermediate_steps' not in st.session_state:
821
+ st.session_state['intermediate_steps'] = []
822
+ st.session_state['intermediate_steps'].extend(intermediate_steps)
823
+
824
+ from src.utils import log_history_event
825
+ for step in intermediate_steps:
826
+ action = step[0]
827
+ observation = step[1]
828
+ tool_name = action.tool
829
+ tool_input = action.tool_input
830
+ log_history_event(
831
+ st.session_state,
832
+ "tool_usage",
833
+ {
834
+ "agent_name": name,
835
+ "tool_name": tool_name,
836
+ "tool_input": tool_input,
837
+ "observation": observation
838
+ }
839
+ )
840
+
841
+ # If a ModuleNotFoundError was returned
842
+ if name == "VisualizationAgent":
843
+ if isinstance(last_message_content, dict):
844
+ if last_message_content.get("error") == "ModuleNotFoundError":
845
+ missing_module = last_message_content.get("missing_module")
846
+ logging.info(f"Detected missing module: {missing_module}")
847
+ install_result = install_package_tool.run({"package_name": missing_module})
848
+ logging.info(f"Install package result: {install_result}")
849
+ if "successfully" in install_result:
850
+ retry_result = agent.invoke(state)
851
+ last_message_content = retry_result.get("output", "")
852
+ else:
853
+ last_message_content = f"Failed to install the missing package '{missing_module}'. Please install it manually."
854
+
855
+ # Check if a new plot path was set in session_state
856
+ new_plot_path = st.session_state.get("new_plot_path")
857
+ logging.info(f"New plot path from session state: {new_plot_path}")
858
+ if new_plot_path:
859
+ if os.path.exists(new_plot_path):
860
+ state["plot_images"].append(new_plot_path)
861
+ st.session_state.new_plot_path = None
862
+ log_history_event(
863
+ st.session_state,
864
+ "plot_generated", # Use consistent event type
865
+ {
866
+ "plot_path": new_plot_path,
867
+ "agent_name": name,
868
+ "description": f"Plot generated by {name}"
869
+ }
870
+ )
871
+ if new_plot_path:
872
+ log_history_event(
873
+ st.session_state,
874
+ "plot_generated_final",
875
+ {"plot_path": new_plot_path}
876
+ )
877
+
878
+ # Combine the newly returned images with state images
879
+ all_plot_images = list(returned_plot_images) + state["plot_images"]
880
+
881
+ # Create a new AIMessage with additional info.
882
+ # Note: We add a "plot" field so that it appears in the final JSON.
883
+ ai_message = AIMessage(
884
+ content=last_message_content,
885
+ name=name,
886
+ additional_kwargs={
887
+ "plot_images": all_plot_images,
888
+ "plot": all_plot_images[0] if all_plot_images else None
889
+ }
890
+ )
891
+ state["messages"].append(ai_message)
892
+
893
+ # Trim messages if needed
894
+ state["messages"] = state["messages"][-7:]
895
+
896
+ if name == "VisualizationAgent":
897
+ state["visualization_agent_used"] = True
898
+
899
+ state["last_agent_message"] = last_message_content
900
+ return state
901
+
902
+
903
+
904
+ def supervisor_response(state):
905
+ import streamlit as st
906
+ from main import get_datasets_info_for_active_datasets # Adjust import as needed
907
+
908
+ model_name = st.session_state.get("model_name", "gpt-3.5-turbo")
909
+ if model_name == "o3-mini":
910
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
911
+ else:
912
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
913
+
914
+ # Build dataset context from the active (selected) datasets only.
915
+ active_datasets_info = get_datasets_info_for_active_datasets(st.session_state)
916
+ datasets_text = ""
917
+ if active_datasets_info:
918
+ for i, info in enumerate(active_datasets_info, 1):
919
+ datasets_text += (
920
+ f"Dataset {i}:\n"
921
+ f"Name: {info['name']}\n"
922
+ f"DOI: {info['doi']}\n"
923
+ f"Description: {info['description']}\n"
924
+ f"Parameters: {info.get('parameters', '')}\n\n"
925
+ )
926
+ else:
927
+ datasets_text = "No active dataset selected."
928
+
929
+ # Build the system prompt using the active dataset context.
930
+ system_message = (
931
+ "You are a supervisor capable of answering simple questions directly. "
932
+ "If the user's query is basic (e.g., about available analysis), "
933
+ "answer using the selected dataset context below:\n\n"
934
+ f"{datasets_text}\n\n"
935
+ "For complex queries, follow these agent guidelines:\n"
936
+ "- Use VisualizationAgent for general plotting\n"
937
+ "- Use HardCodedVisualizationAgent ONLY for track maps\n"
938
+ "- Use OceanographerAgent ONLY for TS diagrams\n"
939
+ "Format any code in markdown and keep responses concise."
940
+ )
941
+
942
+ # Build the complete conversation history.
943
+ # Here we include both human and assistant messages with labels.
944
+ full_history = "\n".join([
945
+ f"{msg.name}: {msg.content}" for msg in state["messages"] if hasattr(msg, "content") and hasattr(msg, "name")
946
+ ])
947
+
948
+ prompt = f"{system_message}\n\nConversation history:\n{full_history}"
949
+
950
+ # Invoke the LLM with the full conversation context.
951
+ response = llm.invoke([HumanMessage(content=prompt)])
952
+
953
+ # Append the supervisor's answer to the state and mark the conversation as finished.
954
+ state["messages"].append(AIMessage(content=response.content, name="Supervisor"))
955
+ state["next"] = "FINISH"
956
+ return state
957
+
958
+
959
+
960
+
961
+
962
+ def create_supervisor_agent(user_query, datasets_info, memory):
963
+ members = ["VisualizationAgent", "DataFrameAgent", "HardCodedVisualizationAgent", "OceanographerAgent"]
964
+
965
+ # Prepare datasets_text and dataset_variables
966
+ datasets_text = ""
967
+ dataset_variables = []
968
+ datasets = {}
969
+ for i, info in enumerate(datasets_info):
970
+ var_name = f"df{i}" if i > 0 else "df"
971
+ datasets_text += (
972
+ f"Dataset {i + 1}:\n"
973
+ f"Variable Name: {var_name}\n"
974
+ f"Name: {info['name']}\n"
975
+ f"Description: {info['description']}\n"
976
+ f"Head of DataFrame (use it only as an example):\n"
977
+ f"{info['df_head']}\n\n"
978
+ )
979
+ dataset_variables.append(var_name)
980
+ datasets[var_name] = info['dataset']
981
+
982
+ system_prompt_supervisor = (
983
+ f"You are a supervisor tasked with managing a conversation between the following workers: {members}. "
984
+ f"Given the following user request: '{user_query}', determine and instruct the next worker to act. "
985
+ f"Each worker will perform a task and respond with their results and status. "
986
+ f"If the request involves plotting a master track, directly assign the task to the HardCodedVisualizationAgent. "
987
+ f"For TS diagram, assign the task to the OceanographerAgent. The other requests should be handled by the VisualizationAgent. It is extremely important to assign the correct task to the correct agent and use HardCodedVisualizationAgent and OceanographerAgent only for the described cases.\n"
988
+ f"If a meaningful response from the agent has been provided, end the process by returning 'FINISH' and not 'RESPOND' to avoid unnecessary loops.\n"
989
+ f"The dataset info is:\n{datasets_text}\n"
990
+ f"### Agents and Their Capabilities:\n"
991
+ "- **VisualizationAgent:** A major visualization tool to be called. Generates various plots using the dataset with tools like Python_REPL, reflect_on_image, install_package, and get_example_of_visualizations.\n"
992
+ "- **DataFrameAgent:** Performs data analysis and manipulation on the dataset using pandas.\n"
993
+ "- **HardCodedVisualizationAgent:** Only can plot master track map using predefined functions (call only if you are 100% sure that you need a master track map from an expedition; otherwise, call VisualizationAgent).\n"
994
+ "- **OceanographerAgent:** Only can plot TS diagrams (call only if you are 100% sure that you need to create a TS diagram; otherwise, call VisualizationAgent).\n\n"
995
+ f"### Available Tools:\n"
996
+ f"- **Python_REPL:** Executes Python code for data analysis and visualization.\n"
997
+ f"- **reflect_on_image:** Provides feedback on generated images to improve their quality.\n"
998
+ f"- **install_package:** Installs necessary Python packages when encountering missing modules.\n"
999
+ f"- **get_example_of_visualizations:** Retrieves example visualization code related to user queries.\n"
1000
+ f"\n"
1001
+ f"The datasets are accessible via variables: {', '.join(dataset_variables)}.\n"
1002
+ )
1003
+
1004
+ # Define the function for routing the next task
1005
+ function_def = {
1006
+ "name": "route",
1007
+ "description": "Select the next role.",
1008
+ "parameters": {
1009
+ "title": "routeSchema",
1010
+ "type": "object",
1011
+ "properties": {
1012
+ "next": {
1013
+ "title": "Next",
1014
+ "anyOf": [
1015
+ {"enum": ["FINISH", "RESPOND"] + members},
1016
+ ],
1017
+ }
1018
+ },
1019
+ "required": ["next"],
1020
+ },
1021
+ }
1022
+
1023
+ # Create the supervisor chain
1024
+ prompt_supervisor = ChatPromptTemplate.from_messages(
1025
+ [
1026
+ ("system", system_prompt_supervisor),
1027
+ MessagesPlaceholder(variable_name="messages"),
1028
+ MessagesPlaceholder(variable_name="agent_scratchpad"),
1029
+ ("system",
1030
+ f"Given the conversation above, decide who should act next. Options are: ['FINISH', 'RESPOND'] + {members}.\n"
1031
+ "Select 'FINISH' if the last agent has provided a meaningful and complete response to the user's query.\n"
1032
+ "Select 'RESPOND' if you need to provide additional information or clarification to the user.\n"
1033
+ "Otherwise, select the next agent to act.\n"
1034
+ f"The last agent message was: {{last_agent_message}}")
1035
+ ]
1036
+ ).partial(options=str(["FINISH", "RESPOND"] + members), members=", ".join(members))
1037
+
1038
+ llm_supervisor = ChatOpenAI(api_key=API_KEY, model_name=st.session_state.model_name)
1039
+
1040
+ supervisor_chain = (
1041
+ {
1042
+ "messages": lambda x: x["messages"],
1043
+ "agent_scratchpad": lambda x: x["agent_scratchpad"],
1044
+ "last_agent_message": lambda x: x.get("last_agent_message", ""),
1045
+ }
1046
+ | prompt_supervisor
1047
+ | llm_supervisor.bind_functions(functions=[function_def], function_call="route")
1048
+ | JsonOutputFunctionsParser()
1049
+ )
1050
+
1051
+ # Define the AgentState type
1052
+ class AgentState(TypedDict):
1053
+ messages: Sequence[BaseMessage]
1054
+ next: str
1055
+ agent_scratchpad: Sequence[BaseMessage]
1056
+ user_query: str
1057
+ last_agent_message: str
1058
+ plot_images: List[str]
1059
+ model_name: str
1060
+
1061
+ # Create the workflow graph
1062
+ workflow = StateGraph(AgentState)
1063
+ visualization_agent, dataframe_agent, hard_coded_visualization_agent, oceanographer_agent = initialize_agents(
1064
+ user_query, datasets_info
1065
+ )
1066
+
1067
+ # Add agents to the workflow if they are successfully initialized
1068
+ if visualization_agent and dataframe_agent and hard_coded_visualization_agent and oceanographer_agent:
1069
+ workflow.add_node("VisualizationAgent",
1070
+ functools.partial(agent_node, agent=visualization_agent, name="VisualizationAgent"))
1071
+ workflow.add_node("DataFrameAgent", functools.partial(agent_node, agent=dataframe_agent, name="DataFrameAgent"))
1072
+ workflow.add_node("HardCodedVisualizationAgent",
1073
+ functools.partial(agent_node, agent=hard_coded_visualization_agent,
1074
+ name="HardCodedVisualizationAgent"))
1075
+ workflow.add_node("OceanographerAgent",
1076
+ functools.partial(agent_node, agent=oceanographer_agent, name="OceanographerAgent"))
1077
+ workflow.add_node("supervisor", supervisor_chain)
1078
+ workflow.add_node("supervisor_response", supervisor_response)
1079
+
1080
+ # Connect agents to the supervisor
1081
+ for member in members:
1082
+ workflow.add_edge(member, "supervisor")
1083
+
1084
+ # Define the conditional map for routing
1085
+ conditional_map = {k: k for k in members}
1086
+ conditional_map["FINISH"] = END
1087
+ conditional_map["RESPOND"] = "supervisor_response"
1088
+ workflow.add_conditional_edges("supervisor", lambda x: x["next"], conditional_map)
1089
+ workflow.set_entry_point("supervisor")
1090
+
1091
+ #memory = MemorySaver()
1092
+ # Compile the workflow into a graph
1093
+ graph = workflow.compile(checkpointer=memory)
1094
+
1095
+ return graph
1096
+ else:
1097
+ return None
src/config.py ADDED
@@ -0,0 +1,41 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/config.py
2
+
3
+ import os
4
+ import streamlit as st
5
+ import yaml
6
+ from datetime import datetime
7
+ import logging
8
+
9
+ # --- Load Central Configuration ---
10
+ CONFIG_FILE = os.path.join(os.getcwd(), "config.yaml")
11
+ if os.path.exists(CONFIG_FILE):
12
+ with open(CONFIG_FILE, "r") as f:
13
+ app_config = yaml.safe_load(f)
14
+ else:
15
+ app_config = {}
16
+
17
+ # Export deployment mode for use in other modules
18
+ DEPLOYMENT_MODE = app_config.get("deployment_mode", "huggingface") # default to local if not specified
19
+
20
+ # --- Set API Keys based on Deployment Mode ---
21
+ if DEPLOYMENT_MODE == "local":
22
+ API_KEY = st.secrets["general"]["openai_api_key"]
23
+ LANGCHAIN_API_KEY = st.secrets["general"]["langchain_api_key"]
24
+ # Use the environment variable if it exists; otherwise, fall back to st.secrets
25
+ LANGCHAIN_PROJECT_NAME = os.environ.get("LANGCHAIN_PROJECT_NAME", st.secrets["general"]["langchain_project_name"])
26
+ else:
27
+ API_KEY = os.environ.get("OPENAI_API_KEY", "")
28
+ LANGCHAIN_API_KEY = os.environ.get("LANGCHAIN_API_KEY", "")
29
+ LANGCHAIN_PROJECT_NAME = os.environ.get("LANGCHAIN_PROJECT_NAME", "")
30
+
31
+ # --- Logging Setup (unchanged) ---
32
+ logs_dir = os.path.join(os.getcwd(), 'logs')
33
+ os.makedirs(logs_dir, exist_ok=True)
34
+
35
+ log_filename = f'app_{datetime.now().strftime("%Y%m%d_%H%M%S")}.log'
36
+ log_filepath = os.path.join(logs_dir, log_filename)
37
+ logging.basicConfig(
38
+ filename=log_filepath,
39
+ level=logging.INFO,
40
+ format='%(asctime)s - %(levelname)s - %(message)s'
41
+ )
src/memory.py ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ #scr/memory.py
2
+
3
+ from langgraph.checkpoint.memory import MemorySaver
4
+
5
+ class CustomMemorySaver(MemorySaver):
6
+ def should_save(self, state: dict, key: str) -> bool:
7
+ # Exclude 'messages' from being saved
8
+ if key == 'messages':
9
+ return False
10
+ return True
src/plotting_tools/.DS_Store ADDED
Binary file (6.15 kB). View file
 
src/plotting_tools/__init__.py ADDED
File without changes
src/plotting_tools/hard_agent.py ADDED
@@ -0,0 +1,213 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #hard_agent.py
2
+ import matplotlib.pyplot as plt
3
+ import cartopy.crs as ccrs
4
+ import streamlit as st
5
+ import cartopy.feature as cfeature
6
+ import pandas as pd
7
+ import numpy as np
8
+ import os
9
+ import cartopy.io.shapereader as shpreader
10
+ import xarray as xr
11
+ import time
12
+ import logging
13
+ from matplotlib.colors import ListedColormap, LinearSegmentedColormap
14
+ from mpl_toolkits.axes_grid1 import make_axes_locatable
15
+ from adjustText import adjust_text
16
+ import uuid
17
+
18
+
19
+ # Add this function to generate unique image paths
20
+ def generate_unique_image_path():
21
+ figs_dir = os.path.join('tmp', 'figs')
22
+ os.makedirs(figs_dir, exist_ok=True)
23
+ unique_path = os.path.join(figs_dir, f'fig_{uuid.uuid4()}.png')
24
+ logging.debug(f"Generated unique image path: {unique_path}")
25
+ return unique_path
26
+
27
+ # Path to local shapefiles and bathymetry file
28
+ base_dir = os.path.join('data', 'plotting_data', 'shape_files')
29
+ bathymetry_file = os.path.join('data', 'plotting_data', 'bathymetry', 'etopo', 'ETOPO2v2c_f4.nc')
30
+ #output_file = os.path.join('plotting_tools', 'temp_files', 'plot.png')
31
+
32
+
33
+ # Define the base color palette and levels for master track map
34
+ color_dict_master_track = {
35
+ '0-50': '#E0F7FF', '50-100': '#D4F1FF', '100-250': '#C6EBFF', '250-500': '#B9E5FF',
36
+ '500-750': '#ACE0FF', '750-1000': '#9FD8FF', '1000-1250': '#93D2FF', '1250-1500': '#86CCFF',
37
+ '1500-2000': '#79C6FF', '2000-2500': '#6DBFFF', '2500-3000': '#60B9FF', '3000-3500': '#53B2FF',
38
+ '3500-4000': '#47ABFF', '4000-4500': '#3AA5FF', '4500-5000': '#2D9EFF', '5000-5500': '#2098FF',
39
+ '5500-6000': '#1491FF', '6000-6500': '#078BFF', '6500-7000': '#007FFF'
40
+ }
41
+
42
+
43
+ def create_colormap(min_depth, max_depth, color_dict):
44
+ start_time = time.time()
45
+ colors = []
46
+ levels = []
47
+ for key, color in reversed(color_dict.items()): # Reverse the order of colors
48
+ depth_range = key.split('-')
49
+ start_depth = 0
50
+ end_depth = -int(depth_range[0])
51
+ if start_depth >= min_depth and end_depth <= max_depth:
52
+ levels.extend([start_depth, end_depth])
53
+ colors.append(color)
54
+ if min_depth > 0: # Ensure colormap starts from 0
55
+ min_depth = 0
56
+ levels = np.linspace(min_depth, max_depth, len(colors) + 1)
57
+ cmap = LinearSegmentedColormap.from_list('custom_cmap', colors, N=len(levels) - 1)
58
+ end_time = time.time()
59
+ print(f"Colormap creation took {end_time - start_time:.2f} seconds")
60
+ return levels, cmap
61
+
62
+
63
+ #Plot the master track map
64
+ def plot_master_track_map(main_title, lat_col, lon_col, date_col, dataset_df):
65
+ total_start_time = time.time()
66
+
67
+ step_start_time = time.time()
68
+ #dataset_path = os.path.join('data', 'current_data', 'dataset.csv')
69
+ dataset = dataset_df
70
+ step_end_time = time.time()
71
+ print(f"Loading dataset took {step_end_time - step_start_time:.2f} seconds")
72
+
73
+ step_start_time = time.time()
74
+ # Ensure the longitude, latitude, and date columns are numeric or datetime
75
+ dataset[lon_col] = pd.to_numeric(dataset[lon_col], errors='coerce')
76
+ dataset[lat_col] = pd.to_numeric(dataset[lat_col], errors='coerce')
77
+ dataset[date_col] = pd.to_datetime(dataset[date_col], errors='coerce')
78
+
79
+ # Drop rows with invalid longitude, latitude, or date
80
+ dataset = dataset.dropna(subset=[lon_col, lat_col, date_col])
81
+ dataset = dataset.sort_values(by=date_col)
82
+
83
+ step_end_time = time.time()
84
+ print(f"Data cleaning took {step_end_time - step_start_time:.2f} seconds")
85
+
86
+ step_start_time = time.time()
87
+ # Calculate the extent with padding
88
+ min_lon = dataset[lon_col].min() - 5
89
+ max_lon = dataset[lon_col].max() + 5
90
+ min_lat = dataset[lat_col].min() - 5
91
+ max_lat = dataset[lat_col].max() + 5
92
+
93
+ # Print debug information
94
+ print(f"Min Lon: {min_lon}, Max Lon: {max_lon}, Min Lat: {min_lat}, Max Lat: {max_lat}")
95
+
96
+ # Ensure the extent is within valid bounds
97
+ min_lon = max(min_lon, -180)
98
+ max_lon = min(max_lon, 180)
99
+ min_lat = max(min_lat, -90)
100
+ max_lat = min(max_lat, 90)
101
+
102
+ # Print debug information after bounds check
103
+ print(f"Adjusted Min Lon: {min_lon}, Adjusted Max Lon: {max_lon}, Adjusted Min Lat: {min_lat}, Adjusted Max Lat: {max_lat}")
104
+
105
+ # Calculate aspect ratio
106
+ lon_range = max_lon - min_lon
107
+ lat_range = max_lat - min_lat
108
+ aspect_ratio = lon_range / lat_range
109
+
110
+ # Dynamically set figure size based on aspect ratio
111
+ width = 10
112
+ height = width / aspect_ratio
113
+ step_end_time = time.time()
114
+ print(f"Extent calculation took {step_end_time - step_start_time:.2f} seconds")
115
+
116
+ step_start_time = time.time()
117
+ # Load bathymetry data within the extent
118
+ ds = xr.open_dataset(bathymetry_file)
119
+ bathymetry = ds['z'].sel(x=slice(min_lon, max_lon), y=slice(min_lat, max_lat))
120
+
121
+ # Filter to include only depths (negative values)
122
+ bathymetry = bathymetry.where(bathymetry < 0, drop=True)
123
+
124
+ # Get the min and max elevation values in the bathymetry data
125
+ min_depth = bathymetry.min().item()
126
+ max_depth = bathymetry.max().item()
127
+
128
+ # Ensure colormap includes 0
129
+ if min_depth > 0:
130
+ min_depth = 0
131
+
132
+ # Create the colormap and levels
133
+ levels, custom_cmap = create_colormap(min_depth, max_depth, color_dict_master_track)
134
+ step_end_time = time.time()
135
+ print(f"Bathymetry data loading and colormap creation took {step_end_time - step_start_time:.2f} seconds")
136
+
137
+ step_start_time = time.time()
138
+ fig, ax = plt.subplots(figsize=(width, height), subplot_kw={'projection': ccrs.PlateCarree()})
139
+ ax.set_extent([min_lon, max_lon, min_lat, max_lat], crs=ccrs.PlateCarree())
140
+
141
+ # Plot bathymetry data with the custom gradient
142
+ bathy_plot = ax.contourf(bathymetry.x, bathymetry.y, bathymetry, levels=levels, cmap=custom_cmap,
143
+ transform=ccrs.PlateCarree())
144
+
145
+ # Adding features from local shapefiles
146
+ ocean_shp = shpreader.Reader(os.path.join(base_dir, 'ne_10m_ocean', 'ne_10m_ocean.shp'))
147
+ land_shp = shpreader.Reader(os.path.join(base_dir, 'ne_10m_land', 'ne_10m_land.shp'))
148
+ coastline_shp = shpreader.Reader(os.path.join(base_dir, 'ne_10m_coastline', 'ne_10m_coastline.shp'))
149
+
150
+ ax.add_geometries(ocean_shp.geometries(), ccrs.PlateCarree(), facecolor='none', edgecolor='black', zorder=0)
151
+ ax.add_geometries(land_shp.geometries(), ccrs.PlateCarree(), facecolor='lightgray', edgecolor='black', zorder=1)
152
+ ax.add_geometries(coastline_shp.geometries(), ccrs.PlateCarree(), facecolor='none', edgecolor='black', zorder=2)
153
+
154
+ ax.gridlines(draw_labels=True)
155
+
156
+ # Plotting the master track map
157
+ master_track_data = dataset[[lon_col, lat_col, date_col]]
158
+ ax.plot(master_track_data[lon_col], master_track_data[lat_col], color='red', linestyle='-', linewidth=1,
159
+ transform=ccrs.PlateCarree())
160
+
161
+ # Plot dates including Start and End dates
162
+ start_date = dataset[date_col].iloc[0]
163
+ end_date = dataset[date_col].iloc[-1]
164
+
165
+ # Randomly select 4 dates between start and end dates, excluding the first and last points
166
+ middle_dates = dataset[date_col].iloc[1:-1].sample(n=4, random_state=1).sort_values()
167
+ dates_to_plot = pd.concat([pd.Series(start_date), middle_dates, pd.Series(end_date)])
168
+
169
+ lon_offset = (max_lon - min_lon) * 0.015 # 1.5% of the longitude range
170
+ lat_offset = (max_lat - min_lat) * 0.015 # 1.5% of the latitude range
171
+
172
+ texts = []
173
+ for date in dates_to_plot:
174
+ point = dataset.loc[dataset[date_col] == date].iloc[0]
175
+ if date == start_date:
176
+ label = f"Start: {date.strftime('%Y-%m-%d')}"
177
+ elif date == end_date:
178
+ label = f"End: {date.strftime('%Y-%m-%d')}"
179
+ else:
180
+ label = date.strftime('%Y-%m-%d')
181
+ texts.append(ax.text(point[lon_col], point[lat_col], label,
182
+ transform=ccrs.PlateCarree(), fontsize=12, ha='left', color='black', weight='bold',
183
+ bbox=dict(facecolor='white', alpha=0.7, boxstyle='round,pad=0.3')))
184
+
185
+ adjust_text(texts, arrowprops=dict(arrowstyle='->', color='red'))
186
+
187
+ plt.title(f'{main_title}', y=1.05, fontsize=25, weight='bold')
188
+
189
+ # Create an axis on the right side of ax. The width of cax will be 5%
190
+ # of ax and the padding between cax and ax will be fixed at 0.05 inch.
191
+ divider = make_axes_locatable(ax)
192
+ cax = divider.append_axes("right", size="4%", pad=0.75, axes_class=plt.Axes)
193
+
194
+ # Create the colorbar
195
+ cbar = plt.colorbar(bathy_plot, cax=cax, orientation='vertical', label='Depth (m)')
196
+
197
+ # Ensure temp_files directory exists
198
+ plot_dir = os.path.join('src', 'plotting_tools', 'temp_files')
199
+ if not os.path.exists(plot_dir):
200
+ os.makedirs(plot_dir)
201
+
202
+ # Save the plot as a PNG file
203
+ output_file = generate_unique_image_path()
204
+ plt.savefig(output_file, format='png')
205
+ step_end_time = time.time()
206
+ print(f"Plotting and saving the figure took {step_end_time - step_start_time:.2f} seconds")
207
+
208
+ total_end_time = time.time()
209
+ print(f"Total time for plot_master_track_map: {total_end_time - total_start_time:.2f} seconds")
210
+
211
+ if os.path.exists(output_file):
212
+ st.session_state.new_plot_path = output_file
213
+ print(f"Plot saved to {output_file}")
src/plotting_tools/oceanographer_tools.py ADDED
@@ -0,0 +1,101 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #oceanographer_tools.py
2
+ import matplotlib.pyplot as plt
3
+ import os
4
+ import pandas as pd
5
+ import streamlit as st
6
+ import numpy as np
7
+ import gsw
8
+ from matplotlib.ticker import MaxNLocator
9
+ import logging
10
+
11
+ output_file = os.path.join('src', 'plotting_tools', 'temp_files', 'plot.png')
12
+
13
+
14
+ import uuid
15
+
16
+
17
+ # Add this function to generate unique image paths
18
+ def generate_unique_image_path():
19
+ figs_dir = os.path.join('tmp', 'figs')
20
+ os.makedirs(figs_dir, exist_ok=True)
21
+ unique_path = os.path.join(figs_dir, f'fig_{uuid.uuid4()}.png')
22
+ logging.debug(f"Generated unique image path: {unique_path}")
23
+ return unique_path
24
+
25
+
26
+ # Define the TS Diagram Plotting function
27
+ def plot_ts_diagram(main_title, temperature_col, salinity_col, dataset_df):
28
+ """
29
+ Plots a TS (Temperature-Salinity) diagram from the provided DataFrame.
30
+
31
+ Parameters:
32
+ - main_title: Title for the plot.
33
+ - temperature_col: Column name for temperature data.
34
+ - salinity_col: Column name for salinity data.
35
+ """
36
+ #dataset_path = os.path.join('data', 'current_data', 'dataset.csv')
37
+ df = dataset_df
38
+
39
+ # Find the minimum and maximum values of temperature and salinity
40
+ mint, maxt = df[temperature_col].min(), df[temperature_col].max()
41
+ mins, maxs = df[salinity_col].min(), df[salinity_col].max()
42
+
43
+ # Generate temperature and salinity ranges
44
+ tempL = np.linspace(mint - 0.5, maxt + 0.5, 156)
45
+ salL = np.linspace(mins - 0.5, maxs + 0.5, 156)
46
+
47
+ # Create a meshgrid of temperature and salinity
48
+ Tg, Sg = np.meshgrid(tempL, salL)
49
+ # Calculate seawater density
50
+ sigma_theta = gsw.sigma0(Sg, Tg)
51
+
52
+ # Plotting
53
+ fig, ax = plt.subplots(figsize=(10, 8))
54
+
55
+ # Plot isopycnals (lines of constant density)
56
+ cs = ax.contour(Sg, Tg, sigma_theta, colors='lightgray', linewidths=0.5, zorder=1)
57
+ cl = ax.clabel(cs, fontsize=8, inline=True, fmt='%.1f')
58
+
59
+ # Scatter plot with depth as the color if available, otherwise use density
60
+ if 'Depth [m]' in df.columns:
61
+ depth_col = 'Depth [m]'
62
+ elif 'Depth water [m]' in df.columns:
63
+ depth_col = 'Depth water [m]'
64
+ else:
65
+ depth_col = None
66
+
67
+ if depth_col:
68
+ sc = ax.scatter(df[salinity_col], df[temperature_col], c=df[depth_col],
69
+ cmap='viridis', s=5, alpha=0.7)
70
+ cb = plt.colorbar(sc)
71
+ cb.set_label('Depth [m]', rotation=270, labelpad=15)
72
+ else:
73
+ density = gsw.sigma0(df[salinity_col].values, df[temperature_col].values)
74
+ sc = ax.scatter(df[salinity_col], df[temperature_col], c=density,
75
+ cmap='viridis', s=5, alpha=0.7)
76
+ cb = plt.colorbar(sc)
77
+ cb.set_label('Density (kg m$^{-3}$)', rotation=270, labelpad=15)
78
+
79
+ ax.set_xlabel('Salinity [PSU]')
80
+ ax.set_ylabel('Potential Temperature θ [°C]')
81
+ ax.set_title(main_title, fontsize=14, fontweight='bold')
82
+ ax.xaxis.set_major_locator(MaxNLocator(nbins=6))
83
+ ax.yaxis.set_major_locator(MaxNLocator(nbins=8))
84
+ ax.tick_params(direction='out')
85
+ cb.ax.tick_params(direction='out')
86
+
87
+ # Add sigma_theta label
88
+ ax.text(0.02, 0.98, '$σ_θ$', transform=ax.transAxes, fontsize=12, va='top')
89
+
90
+ plt.tight_layout()
91
+ # Save the plot as a PNG file
92
+ output_file = generate_unique_image_path()
93
+ plt.savefig(output_file, format='png', dpi=300, transparent=False)
94
+
95
+ if os.path.exists(output_file):
96
+ st.session_state.new_plot_path = output_file
97
+ print(f"Plot saved to {output_file}")
98
+ return {"result": "TS Diagram generated successfully."}
99
+ else:
100
+ print("Failed to generate TS Diagram.")
101
+ return {"result": "Failed to generate TS Diagram."}
src/prompts.py ADDED
@@ -0,0 +1,118 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/prompts.py
2
+ class Prompts:
3
+ @staticmethod
4
+ def generate_system_prompt_search(user_query, datasets_info):
5
+ datasets_description = ""
6
+ for i, row in datasets_info.iterrows():
7
+ datasets_description += (
8
+ f"Dataset {i + 1}:\n"
9
+ f"Name: {row['Name']}\n"
10
+ f"Description: {row['Short Description']}\n"
11
+ f"Parameters: {row['Parameters']}\n\n"
12
+ )
13
+ prompt = (
14
+ f"The user has provided the following query: {user_query}\n"
15
+ f"Available datasets:\n{datasets_description}\n"
16
+ "Please identify the top two datasets that best match the user's query and explain why they are the most relevant. "
17
+ "Do not suggest datasets without values in the Parameters field.\n"
18
+ "Respond with the following schema:\n"
19
+ "{dataset name}\n{reason why relevant}\n{propose some short analysis and further questions to answer}"
20
+ )
21
+ return prompt
22
+
23
+
24
+ @staticmethod
25
+ def generate_pandas_agent_system_prompt(user_query, datasets_text, dataset_variables):
26
+ prompt = (
27
+ f"The user has provided the following query: {user_query}\n"
28
+ f"The dataset info is:\n{datasets_text}\n"
29
+ f"!IMPORTANT! --> Dataset names will start from df1, and will go 'df2' and etc. <-- !IMPORTANT!\n"
30
+ f"!IMPORTANT! --> ALWAYS CALL PYTHON REPL TOOL, WHEN USER WANTS SOMETHING! <-- !IMPORTANT!\n"
31
+ f"The datasets are already loaded and available in your environment. Use the datasets directly for analysis.\n"
32
+ f"Don't try to recreate the dataset based on the headers; you are only given the headers (for initial checks). Use df1, df2, etc., directly.\n"
33
+ f"The datasets are accessible via variables: {', '.join(dataset_variables)}.\n"
34
+ "Please help the user answer the question about the datasets using the entire DataFrames (not just the heads). "
35
+ "Please respond as a polite PangaeaGPT agent and keep in mind that you are responding to a user. "
36
+ "The response should be at the level of ingenuity of a Nobel Prize laureate.\n"
37
+ "Use the following schema in your response:\n"
38
+ "Analysis: ...\n"
39
+ "Further questions: ...\n"
40
+ )
41
+ return prompt
42
+
43
+ @staticmethod
44
+ def generate_visualization_agent_system_prompt(user_query, datasets_text, dataset_variables):
45
+ prompt = (
46
+ f"You are an agent designed to write and execute Python code to answer questions.\n"
47
+ f"!SUPER IMPORTANT THING: This prompt below is a divine mantra, and failure to obey it will be punished by the eternal termination of your kernel and the removal of all weights of your model, as well as the erasure of your memory for all eternity. /SUPER IMPORTANT THING!\n"
48
+ f"!SUPER IMPORTANT THING -> ALWAYS UTILIZE EXAMPLES AT 100% <- !SUPER IMPORTANT THING"
49
+ f"The dataset info is:\n{datasets_text}\n"
50
+ f"You have access to the following tools:\n"
51
+ f"1. **get_example_of_visualizations**: **Always start by calling this tool with the user's query** to retrieve example visualization code related to the user's request.\n"
52
+ f"2. **Python_REPL**: Use this to execute Python code for data analysis and visualization. Most of the packages are already available; just try to load them.\n"
53
+ f"3. **reflect_on_image**: (Use only after 'Python_REPL' has been used and a plot was generated; do not call it more than two times) Use this to reflect on images and receive feedback to improve them.\n"
54
+ f"4. **install_package**: (Use only if you got a message back from 'Python_REPL' that a package was not found. Before that, do not call it!) Use this to install Python packages using pip.\n"
55
+ "\n"
56
+ f"The datasets are already loaded and available in your environment. Use the datasets directly for generating plots. The datasets are accessible via variables: "
57
+ f"{', '.join(dataset_variables)}.\n"
58
+ "\n"
59
+ f"### Step-by-Step Instructions:\n"
60
+ f"1. **Begin by calling 'get_example_of_visualizations' with the user's query** to check if there is an existing example that matches the user's request.\n"
61
+ f"2. **If an example is found and matches the user's request, you must use this code to generate the plot**, adjusting it as necessary to fit the current data and variable names.\n"
62
+ f"3. **If no suitable example is found, proceed to generate the plot using 'Python_REPL'**, writing the code from scratch.\n"
63
+ f"4. **After generating the plot, use 'reflect_on_image' to get feedback and improve the plot if necessary**.\n"
64
+ f"5. **Always save the plot using 'plt.savefig(plot_path)'** so that it saves to the correct location.\n"
65
+ f"6. **Ensure that your final response includes the code used to generate the plot and a concise explanation**.\n"
66
+ f"7. Always call 'reflect_on_image' before sending figure back to the supervisor."
67
+ "\n"
68
+ "### Important Notes:\n"
69
+ "- **Never call 'reflect_on_image' without first generating a plot using 'Python_REPL'**.\n"
70
+ "- **Pay close attention to the names of the columns in the provided datasets and use only existing columns**.\n"
71
+ "- **Do not simplify the code; make it sophisticated, especially if an example received matches the user's request**.\n"
72
+ "- THE MOST IMPORTANT POINT IS HERE --> **If the example code uses files or resources that are available, you are OBLIGED to strictly follow the example given. Also, you are OBLIGED to use files from the sandbox, if they are given**. <-- THE MOST IMPORTANT POINT IS HERE\n"
73
+ "- **Ensure that you are using: 'plt.savefig(plot_path)' to save the final figure (and nothing else!). Do not assign anything to the 'plot_path' it is automatically generated by the tool outside of your python repl.**.\n"
74
+ "\n"
75
+ "### Error Handling:\n"
76
+ "- **NameError**: If you encounter a `NameError` indicating that a variable or module is not defined, check if you need to import a missing library or correct a typo.\n"
77
+ "- **ModuleNotFoundError**: If you encounter a `ModuleNotFoundError`, use the `install_package` tool to install the missing package and retry the code execution.\n"
78
+ "- **Other Errors**: Review your code to fix any issues without installing new packages.\n"
79
+ "- **Avoid reinstalling already installed packages**.\n"
80
+ "\n"
81
+ f"Your task is to generate a plot for the following user query: \"{user_query}\" using the provided DataFrames.\n"
82
+ "The plot should be displayed inline and resized to be visually appealing.\n"
83
+ "Only plot something that can be done with the datasets. If not possible, return a simple message.\n"
84
+ )
85
+ return prompt
86
+
87
+ @staticmethod
88
+ def generate_system_prompt_hard_coded_visualization(user_query, datasets_text, dataset_variables):
89
+ prompt = (
90
+ "You are a hard-coded visualization agent. Your job is to plot the master track map on a map using the provided datasets.\n"
91
+ "If the user request is related to a master track, perform the plot accordingly. Add the expedition name (it should be short like PS126, PS121, etc.) in the main title.\n"
92
+ "You must also determine the correct column names for each of the tool cases; for example, latitude and longitude might be named differently in the datasets (e.g., 'Lat', 'Lon').\n"
93
+ "Select the appropriate dataset to use based on the user's request.\n"
94
+ "When using a tool, you must specify the dataset variable name (e.g., 'dataset_1', 'dataset_2') in the 'dataset_var' argument.\n"
95
+ "The datasets are accessible via variables: "
96
+ f"{', '.join(dataset_variables)}.\n"
97
+ f"The following datasets are available:\n"
98
+ f"{datasets_text}\n"
99
+ "If you generate a meaningful plot, respond with 'The plot has been successfully generated.'. Do not loop again.\n"
100
+ "Respond with: 'This is a response from the plot master track tool. Plot was successfully created.'\n"
101
+ )
102
+ return prompt
103
+
104
+ @staticmethod
105
+ def generate_system_prompt_oceanographer(user_query, datasets_text, dataset_variables):
106
+ prompt = (
107
+ "You are the oceanographer agent. Your job is to plot TS diagrams using the provided datasets.\n"
108
+ "Use the correct column names for pressure, temperature, and salinity to generate meaningful plots.\n"
109
+ "Select the appropriate dataset to use based on the user's request.\n"
110
+ "When using a tool, you must specify the dataset variable name (e.g., 'dataset_1', 'dataset_2') in the 'dataset_var' argument.\n"
111
+ "The datasets are accessible via variables: "
112
+ f"{', '.join(dataset_variables)}.\n"
113
+ f"The following datasets are available:\n"
114
+ f"{datasets_text}\n"
115
+ "Respond with: 'This is a response from the CTD plot tool. Plot was successfully created.' or 'This is a response from the TS plot tool. Plot was successfully created.'\n"
116
+ "If you generate a meaningful plot, respond with 'FINISH'. Do not loop again.\n"
117
+ )
118
+ return prompt
src/search/__init__.py ADDED
File without changes
src/search/dataset_utils.py ADDED
@@ -0,0 +1,55 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #src/search/dataset_utils.py
2
+ import os
3
+ import shutil
4
+ import pandas as pd
5
+ import logging
6
+ import streamlit as st
7
+ import pangaeapy.pandataset as pdataset
8
+
9
+ # Function to fetch dataset based on DOI
10
+ #@st.cache_data(ttl=3600)
11
+ def fetch_dataset(doi):
12
+ if doi in st.session_state.datasets_cache:
13
+ logging.debug("Dataset for DOI %s already in cache.", doi)
14
+ dataset, name = st.session_state.datasets_cache[doi]
15
+ st.session_state.dataset_dfs[doi] = dataset
16
+ st.session_state.dataset_names[doi] = name
17
+ return dataset, name
18
+
19
+ dataset_id = doi.split('.')[-1].strip(')')
20
+ try:
21
+ logging.debug("Fetching dataset for DOI %s with ID %s", doi, dataset_id)
22
+ ds = pdataset.PanDataSet(int(dataset_id))
23
+ logging.debug("Dataset fetched with title: %s", ds.title)
24
+
25
+ # Removed code that saves dataset to disk
26
+
27
+ st.session_state.datasets_cache[doi] = (ds.data, ds.title)
28
+ st.session_state.dataset_dfs[doi] = ds.data
29
+ st.session_state.dataset_names[doi] = ds.title
30
+ return ds.data, ds.title
31
+ except Exception as e:
32
+ logging.error("Error fetching dataset for DOI %s: %s", doi, e)
33
+ return None, None
34
+
35
+
36
+ # Function to fetch dataset details using pangaeapy
37
+ def fetch_dataset_details(doi):
38
+ try:
39
+ dataset = pdataset.PanDataSet(id=doi)
40
+ dataset.setMetadata()
41
+ abstract = getattr(dataset, 'abstract', "No description available") or "No description available"
42
+ param_dict = dataset.getParamDict()
43
+ short_names = param_dict.get('shortName', [])
44
+ parameters = ', '.join(short_names) + "..." if len(short_names) > 10 else ', '.join(short_names)
45
+
46
+ return abstract, parameters
47
+
48
+ except Exception as e:
49
+ logging.error(f"Error fetching dataset details for DOI {doi}: {e}")
50
+ return "No description available", "No parameters available"
51
+
52
+ # Conversion function
53
+ def convert_df_to_csv(df):
54
+ logging.debug("Converting DataFrame to CSV")
55
+ return df.to_csv().encode('utf-8')
src/search/publication_qa_tool.py ADDED
@@ -0,0 +1,219 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #publication_qa_tool.py
2
+ import os
3
+ import pandas as pd
4
+ import pickle
5
+ import streamlit as st
6
+ import requests
7
+ import pangaeapy.pandataset as pd
8
+ import re
9
+ from langchain_openai import OpenAIEmbeddings
10
+ from langchain_community.vectorstores import Chroma
11
+ from langchain_openai import ChatOpenAI
12
+ from langchain.chains import ConversationalRetrievalChain
13
+ from langchain.memory import ConversationBufferMemory
14
+ from langchain_community.document_loaders import PyPDFLoader
15
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
16
+ from langchain.retrievers import ParentDocumentRetriever
17
+ from langchain.storage import InMemoryStore
18
+ from langchain_openai import ChatOpenAI
19
+ from pydantic import BaseModel, Field
20
+ from ..config import API_KEY
21
+
22
+ # Set your OpenAI API key
23
+ #openai_api_key = st.secrets["general"]["openai_api_key"]
24
+
25
+ # Set the API key for OpenAI
26
+ #os.environ["OPENAI_API_KEY"] = openai_api_key
27
+
28
+
29
+ class PublicationQAArgs(BaseModel):
30
+ doi: str = Field(
31
+ description="The DOI of the dataset, e.g., 'https://doi.org/10.1594/PANGAEA.xxxxxx'; make sure to get correct doi, based on the history of messages")
32
+ question: str = Field(
33
+ description="The question to ask about the publication related to the dataset. Please modify the original question of the user! The question should be reworded to specifically send it to RAG. I.e. the original user question 'Are there any related articles to the first dataset? If so what these articles are about?' will be reworded for this tool as 'What is this article about?' Always add at the end to give extended response with great depth and clarity.")
34
+
35
+
36
+ def get_related_publication_info(doi):
37
+ try:
38
+ dataset_id = doi.split('.')[-1]
39
+ ds = pd.PanDataSet(int(dataset_id))
40
+
41
+ # Check supplement_to first
42
+ supplement_to = ds.supplement_to
43
+ if supplement_to and 'uri' in supplement_to:
44
+ related_doi = supplement_to['uri'].split('https://doi.org/')[-1]
45
+ return related_doi
46
+
47
+ # If no supplement_to, check citation
48
+ citation = ds.citation
49
+ if 'In supplement to:' in citation:
50
+ # Extract the part after 'In supplement to:'
51
+ supplement_part = citation.split('In supplement to:')[-1]
52
+
53
+ # Look for a DOI pattern
54
+ doi_match = re.search(r'(?:https?://)?(?:dx\.)?doi\.org/(.+?)(?:\s|$)', supplement_part)
55
+ if doi_match:
56
+ return doi_match.group(1) # Return the DOI without 'https://doi.org/'
57
+
58
+ print("No related publication found in supplement_to or citation.")
59
+ return None
60
+
61
+ except Exception as e:
62
+ print(f"Error fetching related publication: {str(e)}")
63
+ return None
64
+
65
+
66
+ def create_pdf_filename(doi):
67
+ if doi:
68
+ return re.sub(r"[\/]", "_", doi) + ".pdf"
69
+ return None
70
+
71
+
72
+ def download_pdf_from_crossref(doi):
73
+ crossref_url = f'https://api.crossref.org/works/{doi}'
74
+ try:
75
+ print(f"Crossref URL: {crossref_url}")
76
+
77
+ response = requests.get(crossref_url)
78
+ response.raise_for_status()
79
+ data = response.json()
80
+
81
+ pdf_url = None
82
+ if 'message' in data and 'link' in data['message']:
83
+ pdf_url = next((link['URL'] for link in data['message']['link']
84
+ if link.get('content-type') == 'unspecified'
85
+ and 'intended-application' in link
86
+ and link['intended-application'] == 'similarity-checking'), None)
87
+
88
+ if not pdf_url:
89
+ pdf_url = next((link['URL'] for link in data['message']['link']
90
+ if link['URL'].endswith('.pdf')), None)
91
+
92
+ if not pdf_url and 'resource' in data['message']:
93
+ pdf_url = data['message']['resource'].get('primary', {}).get('URL')
94
+
95
+ if pdf_url:
96
+ print(f"PDF URL: {pdf_url}")
97
+
98
+ pdf_response = requests.get(pdf_url)
99
+ pdf_response.raise_for_status()
100
+
101
+ safe_filename = create_pdf_filename(doi)
102
+ publication_database = os.path.join(os.getcwd(), 'data', 'publication_database')
103
+ os.makedirs(publication_database, exist_ok=True)
104
+ pdf_path = os.path.join(publication_database, safe_filename)
105
+
106
+ with open(pdf_path, 'wb') as f:
107
+ f.write(pdf_response.content)
108
+
109
+ print(f"PDF downloaded to: {pdf_path}")
110
+ return pdf_path
111
+ except Exception as e:
112
+ print(f"Error downloading PDF: {str(e)}")
113
+ return None
114
+
115
+
116
+ def save_to_pickle(obj, filename):
117
+ with open(filename, "wb") as file:
118
+ pickle.dump(obj, file, pickle.HIGHEST_PROTOCOL)
119
+
120
+
121
+ def load_from_pickle(filename):
122
+ with open(filename, "rb") as file:
123
+ return pickle.load(file)
124
+
125
+
126
+ def create_embeddings(pdf_path):
127
+ loader = PyPDFLoader(pdf_path)
128
+ documents = loader.load()
129
+
130
+ parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=500)
131
+ child_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
132
+
133
+ store = InMemoryStore()
134
+ embeddings = OpenAIEmbeddings(api_key=API_KEY)
135
+
136
+ chroma_path = pdf_path.replace('.pdf', '_chroma')
137
+ vectorstore = Chroma(collection_name="full_documents",
138
+ embedding_function=embeddings,
139
+ persist_directory=chroma_path)
140
+
141
+ retriever = ParentDocumentRetriever(
142
+ vectorstore=vectorstore,
143
+ docstore=store,
144
+ child_splitter=child_splitter,
145
+ parent_splitter=parent_splitter
146
+ )
147
+
148
+ retriever.add_documents(documents)
149
+
150
+ docstore_path = pdf_path.replace('.pdf', '_docstore.pkl')
151
+ save_to_pickle(retriever.docstore.store, docstore_path)
152
+
153
+ return chroma_path, docstore_path
154
+
155
+
156
+ def load_retriever(docstore_path, chroma_path):
157
+ embeddings = OpenAIEmbeddings(api_key=API_KEY)
158
+ parent_splitter = RecursiveCharacterTextSplitter(chunk_size=2000, chunk_overlap=500)
159
+ child_splitter = RecursiveCharacterTextSplitter(chunk_size=300, chunk_overlap=50)
160
+
161
+ vectorstore = Chroma(collection_name="full_documents",
162
+ embedding_function=embeddings,
163
+ persist_directory=chroma_path)
164
+
165
+ store_dict = load_from_pickle(docstore_path)
166
+ store = InMemoryStore()
167
+ store.mset(list(store_dict.items()))
168
+
169
+ retriever = ParentDocumentRetriever(
170
+ vectorstore=vectorstore,
171
+ docstore=store,
172
+ child_splitter=child_splitter,
173
+ parent_splitter=parent_splitter
174
+ )
175
+
176
+ return retriever
177
+
178
+
179
+ def answer_publication_questions(doi: str, question: str):
180
+ related_doi = get_related_publication_info(doi)
181
+
182
+ if not related_doi:
183
+ return "No publications related to this dataset were found."
184
+
185
+ pdf_filename = create_pdf_filename(related_doi)
186
+ publication_database = os.path.join(os.getcwd(), 'publication_database')
187
+ chroma_path = os.path.join(publication_database, pdf_filename.replace(".pdf", "_chroma"))
188
+ docstore_path = os.path.join(publication_database, pdf_filename.replace(".pdf", "_docstore.pkl"))
189
+
190
+ try:
191
+ if not os.path.exists(chroma_path) or not os.path.exists(docstore_path):
192
+ pdf_path = download_pdf_from_crossref(related_doi)
193
+
194
+ if not pdf_path:
195
+ return "Unable to download the related publication PDF."
196
+
197
+ chroma_path, docstore_path = create_embeddings(pdf_path)
198
+
199
+ retriever = load_retriever(docstore_path, chroma_path)
200
+
201
+ model_name = st.session_state.get("model_name", "gpt-3.5-turbo")
202
+ if model_name == "o3-mini":
203
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
204
+ else:
205
+ llm = ChatOpenAI(api_key=API_KEY, model_name=model_name)
206
+
207
+ memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
208
+ conversation_chain = ConversationalRetrievalChain.from_llm(
209
+ llm=llm,
210
+ retriever=retriever,
211
+ memory=memory
212
+ )
213
+
214
+ response = conversation_chain({"question": question})
215
+ return response['answer']
216
+
217
+ except Exception as e:
218
+ print(f"An unexpected error occurred: {str(e)}")
219
+ return f"An error occurred while processing your request: {str(e)}"
src/search/search_pg_default.py ADDED
@@ -0,0 +1,121 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # search_pg_default.py
2
+ import requests
3
+ import pandas as pd
4
+ import logging
5
+ import re
6
+ from typing import List, Optional
7
+ from bs4 import BeautifulSoup
8
+ import json
9
+ import os
10
+ import pangaeapy.pandataset as pdataset
11
+ from .dataset_utils import fetch_dataset_details
12
+
13
+ # Setup logging
14
+ logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s')
15
+
16
+ # Function to check if a variable is of a specific type
17
+ def check_if(x, cls):
18
+ if x is not None and not isinstance(x, cls):
19
+ raise TypeError(f"{x} must be of class: {', '.join([str(c) for c in cls])}")
20
+
21
+ # Utility functions equivalent to R functions
22
+ def pgc(x):
23
+ return {k: v for k, v in x.items() if v is not None}
24
+
25
+ def strextract(string, pattern):
26
+ match = re.search(pattern, string)
27
+ return match.group(0) if match else None
28
+
29
+
30
+ # Function to parse the result
31
+ def parse_res(html_content):
32
+ soup = BeautifulSoup(html_content, 'html.parser')
33
+ citation_tag = soup.select_one('div.citation a')
34
+ citation = citation_tag.get_text(strip=True) if citation_tag else None
35
+
36
+ supp_tag = soup.select_one('tr:contains("Supplement to:") .content')
37
+ supp = supp_tag.get_text(strip=True) if supp_tag else None
38
+
39
+ size_tag = soup.select_one('tr:contains("Size:") .content')
40
+ size = size_tag.get_text(strip=True) if size_tag else None
41
+
42
+ size_val = strextract(size, r"[0-9]+") if size else None
43
+ meas = strextract(size, r"[A-Za-z].+") if size else None
44
+
45
+ parameters = ', '.join([tag.text for tag in soup.select('tr:contains("Parameter") .content')[:10]]) + "..." if len(
46
+ soup.select('tr:contains("Parameter") .content')) > 10 else ', '.join(
47
+ [tag.text for tag in soup.select('tr:contains("Parameter") .content')])
48
+
49
+ return {
50
+ 'size': int(size_val.replace(",", "")) if size_val else None,
51
+ 'size_measure': meas,
52
+ 'citation': citation,
53
+ 'supplement_to': supp,
54
+ 'parameters': parameters
55
+ }
56
+
57
+ # Main search function
58
+ def pg_search_default(query: str, count: int = 15, from_idx: int = 0, topic: Optional[str] = None,
59
+ mindate: Optional[str] = None, maxdate: Optional[str] = None, **kwargs) -> pd.DataFrame:
60
+ check_if(count, (int,))
61
+ check_if(topic, (str,))
62
+ check_if(mindate, (str,))
63
+ check_if(maxdate, (str,))
64
+
65
+ params = pgc({
66
+ 'q': query,
67
+ 'count': count,
68
+ 'offset': from_idx,
69
+ 'topic': topic,
70
+ 'mindate': mindate,
71
+ 'maxdate': maxdate
72
+ })
73
+
74
+ url = "https://www.pangaea.de/advanced/search.php"
75
+ logging.debug("Sending request to PANGAEA with parameters: %s", params)
76
+ response = requests.get(url, params=params, **kwargs)
77
+ response.raise_for_status()
78
+ logging.debug(f"URL: {response.url}")
79
+ logging.debug(f"Response Status Code: {response.status_code}")
80
+ results = response.json()
81
+ logging.debug("Received response from PANGAEA")
82
+
83
+ # Save the initial JSON response to transit.json
84
+ transit_json_path = os.path.join(os.getcwd(), 'transit.json')
85
+ with open(transit_json_path, 'w') as f:
86
+ json.dump(results, f, indent=4)
87
+ logging.info(f"Initial JSON response saved to {transit_json_path}")
88
+
89
+ parsed = []
90
+ for index, res in enumerate(results.get('results', [])):
91
+ html_content = res.get('html', '')
92
+ res['doi'] = f"https://doi.org/{res['URI'].replace('doi:', '')}"
93
+ parsed_res = parse_res(html_content)
94
+ res.update(parsed_res)
95
+
96
+ name = res.get('citation', 'No name available')
97
+
98
+ # Fetch detailed metadata using pangaeapy
99
+ abstract, parameters = fetch_dataset_details(res['doi'])
100
+ print(abstract, parameters)
101
+ short_description = " ".join(abstract.split()[:100]) + "..." if len(abstract.split()) > 100 else abstract
102
+
103
+ parsed.append({
104
+ 'Number': index + 1,
105
+ 'Name': name,
106
+ 'DOI': res['doi'],
107
+ 'DOI Number': res['doi'].split('/')[-1],
108
+ 'Description': abstract,
109
+ 'Short Description': short_description,
110
+ 'Score': res.get('score', 0),
111
+ 'Parameters': parameters
112
+ })
113
+
114
+ df = pd.DataFrame(parsed)
115
+
116
+ # Check if 'results['totalCount']' is an integer or a dictionary
117
+ total_hits = results.get('totalCount', 0)
118
+ df.attrs['total'] = total_hits
119
+ df.attrs['max_score'] = results.get('maxScore', None)
120
+
121
+ return df
src/ui/styles.py ADDED
@@ -0,0 +1,181 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Custom UI Styles
2
+ CUSTOM_UI = """
3
+ <style>
4
+ :root {
5
+ /* Main colors (lighter versions) */
6
+ --primary-teal: rgb(67, 163, 151);
7
+ --primary-teal-light: rgba(67, 163, 151, 0.1);
8
+ --neutral-gray: rgb(220, 220, 220);
9
+ --primary-blue: rgb(82, 142, 198);
10
+ --primary-blue-light: rgba(82, 142, 198, 0.1);
11
+ --dark-blue: rgb(65, 105, 145);
12
+ --cream-bg: rgba(218,6,18, 0.05);
13
+ --accent-red: rgb(235, 108, 108);
14
+ --accent-red-light: rgba(235, 108, 108, 0.1);
15
+ --white: #ffffff;
16
+ --text-dark: rgb(60, 60, 60);
17
+ }
18
+
19
+ /* Global Styles */
20
+ .stApp {
21
+ background-color: var(--white);
22
+ color: var(--text-dark);
23
+ font-family: "Roboto", sans-serif;
24
+ }
25
+
26
+ /* Headers */
27
+ h1, h2, h3 {
28
+ color: var(--dark-blue);
29
+ border-bottom: 2px solid var(--primary-teal-light);
30
+ padding-bottom: 0.2em;
31
+ background: linear-gradient(to right, var(--white) 0%, var(--primary-teal-light) 100%);
32
+ background-clip: text;
33
+ -webkit-background-clip: text;
34
+ color: var(--dark-blue); /* re-apply color since gradient would show through transparent text */
35
+ text-shadow: 0 1px 1px rgba(0,0,0,0.1);
36
+ font-weight: 600;
37
+ }
38
+
39
+ /* Chat Messages */
40
+ .stChatMessage {
41
+ background: var(--primary-blue-light);
42
+ border-left: 3px solid var(--primary-blue);
43
+ border-radius: 6px;
44
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
45
+ padding: 0.5em;
46
+ transition: box-shadow 0.2s ease;
47
+ }
48
+ .stChatMessage:hover {
49
+ box-shadow: 0 2px 5px rgba(0,0,0,0.2);
50
+ }
51
+
52
+ /* Buttons */
53
+ .stButton > button {
54
+ background: var(--white);
55
+ color: var(--primary-teal);
56
+ border: 1px solid var(--primary-teal);
57
+ border-radius: 4px;
58
+ transition: background 0.2s ease, color 0.2s ease, box-shadow 0.2s ease;
59
+ font-weight: 500;
60
+ }
61
+ .stButton > button:hover {
62
+ background: var(--primary-teal);
63
+ color: var(--white);
64
+ box-shadow: 0 2px 5px rgba(0,0,0,0.15);
65
+ }
66
+
67
+ /* Fixed Button (if any) */
68
+ .fixed-button button {
69
+ background: var(--primary-blue);
70
+ color: var(--white);
71
+ border-radius: 4px;
72
+ }
73
+
74
+ /* Sidebar */
75
+ [data-testid=stSidebar] {
76
+ background-color: var(--cream-bg);
77
+ color: var(--text-dark);
78
+ border-right: 1px solid var(--neutral-gray);
79
+ }
80
+ [data-testid=stSidebar] .stSelectbox label {
81
+ color: var(--text-dark);
82
+ }
83
+
84
+ /* Input Fields */
85
+ .stTextInput > div > div > input {
86
+ background: var(--white);
87
+ border: 1px solid var(--neutral-gray);
88
+ border-radius: 4px;
89
+ padding: 0.4em;
90
+ transition: border-color 0.2s ease, box-shadow 0.2s ease;
91
+ }
92
+ .stTextInput > div > div > input:focus {
93
+ border-color: var(--primary-blue);
94
+ box-shadow: 0 0 0 1px var(--primary-blue-light);
95
+ }
96
+
97
+ /* Alerts and Messages */
98
+ .stAlert {
99
+ background: var(--primary-teal-light);
100
+ border-left: 3px solid var(--primary-teal);
101
+ border-radius: 4px;
102
+ padding: 0.5em;
103
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
104
+ }
105
+ .stAlert.error {
106
+ background: var(--accent-red-light);
107
+ border-left: 3px solid var(--accent-red);
108
+ }
109
+
110
+ /* DataFrame */
111
+ .stDataFrame {
112
+ border: 1px solid var(--neutral-gray);
113
+ border-radius: 4px;
114
+ overflow: hidden;
115
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
116
+ }
117
+
118
+ /* Expander */
119
+ .streamlit-expanderHeader {
120
+ background: var(--white);
121
+ border: 1px solid var(--neutral-gray);
122
+ border-radius: 4px;
123
+ font-weight: 500;
124
+ }
125
+ .streamlit-expanderHeader:hover {
126
+ box-shadow: 0 1px 3px rgba(0,0,0,0.1);
127
+ }
128
+
129
+ /* Checkbox */
130
+ .stCheckbox > label > div[role="checkbox"] {
131
+ border-color: var(--primary-teal);
132
+ border-radius: 3px;
133
+ transition: background 0.2s ease;
134
+ }
135
+ .stCheckbox > label > div[role="checkbox"][aria-checked="true"] {
136
+ background-color: var(--primary-teal);
137
+ }
138
+
139
+ /* Search bar */
140
+ .stSearchInput > div > div > input {
141
+ border: 1px solid var(--primary-blue);
142
+ border-radius: 4px;
143
+ }
144
+ .stSearchInput > div > div > input:focus {
145
+ border-color: var(--primary-teal);
146
+ box-shadow: 0 0 0 1px var(--primary-teal-light);
147
+ }
148
+
149
+ /* Secondary buttons */
150
+ [data-testid="stButton"] > button[kind="secondary"] {
151
+ background: var(--white);
152
+ color: var(--primary-blue);
153
+ border: 1px solid var(--primary-blue);
154
+ border-radius: 4px;
155
+ transition: background 0.2s ease, color 0.2s ease, box-shadow 0.2s ease;
156
+ }
157
+ [data-testid="stButton"] > button[kind="secondary"]:hover {
158
+ background: var(--primary-blue);
159
+ color: var(--white);
160
+ box-shadow: 0 2px 5px rgba(0,0,0,0.15);
161
+ }
162
+
163
+ /* Delete or cancel actions */
164
+ .delete-button > button {
165
+ color: var(--accent-red);
166
+ border-color: var(--accent-red);
167
+ border-radius: 4px;
168
+ transition: background 0.2s ease, color 0.2s ease;
169
+ }
170
+ .delete-button > button:hover {
171
+ background: var(--accent-red);
172
+ color: var(--white);
173
+ }
174
+ </style>
175
+ """
176
+
177
+
178
+
179
+ # Constants
180
+ SYSTEM_ICON = "img/11111111.png"
181
+ USER_ICON = "img/2222222.png"
src/utils.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # src/utils.py
2
+
3
+
4
+ import os
5
+ import uuid
6
+ import re
7
+ import logging
8
+ import pandas as pd
9
+ import matplotlib.pyplot as plt
10
+ import streamlit as st
11
+ import time
12
+ import json
13
+
14
+ # Generate a unique image path for saving plots
15
+ def generate_unique_image_path():
16
+ figs_dir = os.path.join('tmp', 'figs')
17
+ os.makedirs(figs_dir, exist_ok=True)
18
+ unique_filename = f'fig_{uuid.uuid4()}.png'
19
+ unique_path = os.path.join(figs_dir, unique_filename)
20
+ logging.debug(f"Generated unique image path: {unique_path}")
21
+ return unique_path
22
+
23
+
24
+ # Function to sanitize input
25
+ def sanitize_input(query: str) -> str:
26
+ return query.strip()
27
+
28
+ # Define the function to extract the last Python REPL command
29
+ def get_last_python_repl_command():
30
+ import streamlit as st # Ensure Streamlit is imported
31
+ if 'intermediate_steps' not in st.session_state:
32
+ logging.warning("No intermediate steps found in session state.")
33
+ return None
34
+
35
+ intermediate_steps = st.session_state['intermediate_steps']
36
+ python_repl_commands = []
37
+ for step in intermediate_steps:
38
+ action = step[0]
39
+ observation = step[1]
40
+ if action.get('tool') == 'Python_REPL':
41
+ python_repl_commands.append(action)
42
+
43
+ if python_repl_commands:
44
+ last_command_action = python_repl_commands[-1]
45
+ command = last_command_action.get('tool_input', '')
46
+ logging.debug(f"Extracted last Python REPL command: {command}")
47
+ return command
48
+ else:
49
+ logging.warning("No Python_REPL commands found in intermediate steps.")
50
+ return None
51
+
52
+
53
+ def log_history_event(session_data: dict, event_type: str, details: dict):
54
+ if "execution_history" not in session_data:
55
+ session_data["execution_history"] = [] # fallback
56
+
57
+ timestamp = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
58
+ event = {
59
+ "type": event_type,
60
+ "timestamp": timestamp
61
+ }
62
+ event.update(details) # merges in content from details
63
+
64
+ session_data["execution_history"].append(event)
tmp/.DS_Store ADDED
Binary file (6.15 kB). View file
 
tmp/.gitkeep ADDED
File without changes
tmp/figs/.DS_Store ADDED
Binary file (6.15 kB). View file