Psiska commited on
Commit
3d2943f
·
1 Parent(s): 9a0136d

Added memory tool

Browse files
__pycache__/crew.cpython-310.pyc CHANGED
Binary files a/__pycache__/crew.cpython-310.pyc and b/__pycache__/crew.cpython-310.pyc differ
 
app.py CHANGED
@@ -2,6 +2,9 @@ import os, threading
2
  import gradio as gr
3
  from crew import run_crew
4
  from utils import get_questions
 
 
 
5
 
6
  def ask(question, openai_api_key, gemini_api_key, anthropic_api_key, file_name = ""):
7
  """
@@ -152,3 +155,4 @@ with gr.Blocks() as grady:
152
  grady.launch(mcp_server=True)
153
 
154
 
 
 
2
  import gradio as gr
3
  from crew import run_crew
4
  from utils import get_questions
5
+ from tools.memory_tools import memory_tool
6
+
7
+
8
 
9
  def ask(question, openai_api_key, gemini_api_key, anthropic_api_key, file_name = ""):
10
  """
 
155
  grady.launch(mcp_server=True)
156
 
157
 
158
+
crew.py CHANGED
@@ -4,6 +4,7 @@
4
  # https://ai.google.dev/gemini-api/docs
5
 
6
  import os
 
7
  from crewai import Agent, Crew, Task
8
  from crewai.agents.agent_builder.base_agent import BaseAgent
9
  from crewai.project import CrewBase, agent, crew, task
@@ -150,26 +151,46 @@ class GAIACrew():
150
 
151
 
152
  def run_crew(question, file_path):
 
153
  final_question = question
154
-
155
  if file_path:
156
- if is_ext(file_path, ".csv") or is_ext(file_path, ".xls") or is_ext(file_path, ".xlsx") or is_ext(file_path, ".json") or is_ext(file_path, ".jsonl"):
 
 
157
  json_data = read_file_json(file_path)
158
  final_question = f"{question} JSON data:\n{json_data}."
159
  else:
160
  final_question = f"{question} File path: {file_path}."
161
 
 
 
 
 
 
 
 
 
 
 
 
162
  crew_instance = GAIACrew()
163
- answer = crew_instance.get_crew().kickoff(inputs={"question": final_question})
164
- final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(answer))
 
 
 
 
 
 
 
 
 
 
 
165
 
166
- print(f"=> Initial question: {question}")
167
- print(f"=> Final question: {final_question}")
168
- print(f"=> Initial answer: {answer}")
169
- print(f"=> Final answer: {final_answer}")
170
-
171
  return final_answer
172
 
 
173
  def get_final_answer(model, question, answer):
174
  prompt_template = """
175
  You are an expert question answering assistant. Given a question and an initial answer, your task is to provide the final answer.
 
4
  # https://ai.google.dev/gemini-api/docs
5
 
6
  import os
7
+ from tools.memory_tools import memory_tool
8
  from crewai import Agent, Crew, Task
9
  from crewai.agents.agent_builder.base_agent import BaseAgent
10
  from crewai.project import CrewBase, agent, crew, task
 
151
 
152
 
153
  def run_crew(question, file_path):
154
+ # 0) Prepend file data if needed
155
  final_question = question
 
156
  if file_path:
157
+ if is_ext(file_path, ".csv") or is_ext(file_path, ".xls") \
158
+ or is_ext(file_path, ".xlsx") or is_ext(file_path, ".json") \
159
+ or is_ext(file_path, ".jsonl"):
160
  json_data = read_file_json(file_path)
161
  final_question = f"{question} JSON data:\n{json_data}."
162
  else:
163
  final_question = f"{question} File path: {file_path}."
164
 
165
+ # 1) Load memory (relevant recall)
166
+ history = memory_tool.run("load", question)
167
+ # In case some other implementation returns a list, handle it:
168
+ if isinstance(history, list):
169
+ history = "\n".join(history)
170
+
171
+ # 2) Build the prompt we send to Crew
172
+ # If there is history, prefix it; otherwise just use the question
173
+ full_input = (history + "\n" if history else "") + final_question
174
+
175
+ # 3) Run the multi-agent Crew
176
  crew_instance = GAIACrew()
177
+ raw_answer = crew_instance.get_crew().kickoff(inputs={"question": full_input})
178
+
179
+ # 4) Save only the **user** question for future recall
180
+ memory_tool.run("save", question)
181
+
182
+ # 5) Post-process for the “final answer”
183
+ final_answer = get_final_answer(FINAL_ANSWER_MODEL, question, str(raw_answer))
184
+
185
+ # 6) (Optional) print debug traces
186
+ print(f"=> History:\n{history}")
187
+ print(f"=> Prompt sent:\n{full_input}")
188
+ print(f"=> Raw answer:\n{raw_answer}")
189
+ print(f"=> Final answer:\n{final_answer}")
190
 
 
 
 
 
 
191
  return final_answer
192
 
193
+
194
  def get_final_answer(model, question, answer):
195
  prompt_template = """
196
  You are an expert question answering assistant. Given a question and an initial answer, your task is to provide the final answer.
faiss_index/index.faiss ADDED
Binary file (30.8 kB). View file
 
faiss_index/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:04bd4a34c4fb5c05840d6d26afa952c49bad6fd9e93951096a45584c4ad69dbb
3
+ size 893
requirements.txt CHANGED
@@ -1,10 +1,13 @@
1
  arize-phoenix-otel==0.10.1
2
- crewai==0.121.1
3
- crewai[tools]==0.121.1
4
- crewai-tools==0.45.0
5
  google-genai==1.13.0
6
  gradio[mcp]==5.31.0
7
  openinference-instrumentation-crewai==0.1.10
8
  python-docx==1.1.2
9
  python-pptx==1.0.2
10
- stagehand-py==0.3.10
 
 
 
 
 
1
  arize-phoenix-otel==0.10.1
2
+ crewai
3
+ crewai-tools
 
4
  google-genai==1.13.0
5
  gradio[mcp]==5.31.0
6
  openinference-instrumentation-crewai==0.1.10
7
  python-docx==1.1.2
8
  python-pptx==1.0.2
9
+ stagehand-py==0.3.10
10
+ langchain
11
+ redis==4.5.5 # if you choose Redis for persistence
12
+ python-dotenv # to load REDIS_URL from .env
13
+ faiss-cpu
tools/__pycache__/ai_tools.cpython-310.pyc CHANGED
Binary files a/tools/__pycache__/ai_tools.cpython-310.pyc and b/tools/__pycache__/ai_tools.cpython-310.pyc differ
 
tools/__pycache__/memory_tools.cpython-310.pyc ADDED
Binary file (2.28 kB). View file
 
tools/ai_tools.py CHANGED
@@ -1,5 +1,6 @@
1
  import os
2
 
 
3
  from crewai.tools import tool
4
  from crewai_tools import StagehandTool
5
  from google import genai
 
1
  import os
2
 
3
+ from tools.memory_tools import memory_tool
4
  from crewai.tools import tool
5
  from crewai_tools import StagehandTool
6
  from google import genai
tools/memory_tools.py ADDED
@@ -0,0 +1,91 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # tools/memory_tools.py
2
+
3
+ import os
4
+ from crewai.tools import tool
5
+ from langchain.schema import Document
6
+ from langchain.embeddings import OpenAIEmbeddings
7
+ from langchain_community.vectorstores import FAISS # faiss-cpu must be installed
8
+
9
+ INDEX_DIR = "faiss_index"
10
+ _embeddings = None # will hold our OpenAIEmbeddings
11
+ _vectorstore = None # will hold our FAISS instance
12
+
13
+ def _init_embeddings():
14
+ global _embeddings
15
+ if _embeddings is None:
16
+ _embeddings = OpenAIEmbeddings(
17
+ openai_api_key=os.environ.get("OPENAI_API_KEY", "")
18
+ )
19
+ return _embeddings
20
+
21
+ def _load_or_build_index(doc: Document = None):
22
+ """
23
+ - If there's an in-memory index but the on-disk folder was removed, reset it.
24
+ - If there's an in-memory index and the folder still exists, reuse it.
25
+ - Else, if there's an on-disk index, load it.
26
+ - Else, if a single `doc` is provided, create a new index from it.
27
+ """
28
+ global _vectorstore
29
+ emb = _init_embeddings()
30
+
31
+ # 1) If we had an in-memory index but the INDEX_DIR was deleted, clear it so we rebuild
32
+ if _vectorstore is not None and not os.path.isdir(INDEX_DIR):
33
+ _vectorstore = None
34
+
35
+ # 2) If we now have an in-memory index, reuse it
36
+ if _vectorstore is not None:
37
+ return _vectorstore
38
+
39
+ # 3) On‐disk index?
40
+ if os.path.isdir(INDEX_DIR):
41
+ _vectorstore = FAISS.load_local(
42
+ INDEX_DIR, emb, allow_dangerous_deserialization=True
43
+ )
44
+ return _vectorstore
45
+
46
+ # 4) No index yet, but we're saving the first doc
47
+ if doc is not None:
48
+ _vectorstore = FAISS.from_documents([doc], emb)
49
+ _vectorstore.save_local(INDEX_DIR)
50
+ return _vectorstore
51
+
52
+ # 5) Otherwise, no index and no doc to build from
53
+ return None
54
+
55
+ @tool("Memory Tool")
56
+ def memory_tool(action: str, text: str) -> str:
57
+ """
58
+ action: "save" to store the user message, or "load" to retrieve similar past messages.
59
+ text: the message to save, or the query for load.
60
+
61
+ Returns:
62
+ - on "save": "Saved"
63
+ - on "load": up to 3 similar messages joined by newline, or "" if none
64
+ - otherwise: "Invalid action"
65
+ """
66
+ act = action.strip().lower()
67
+
68
+ if act == "save":
69
+ # Wrap the text in a Document
70
+ doc = Document(page_content=text)
71
+
72
+ # Build or load the index (if it's the first doc, we pass it here)
73
+ vs = _load_or_build_index(doc)
74
+
75
+ # If we already had an index, just add the new doc
76
+ if vs and os.path.isdir(INDEX_DIR):
77
+ vs.add_documents([doc])
78
+ vs.save_local(INDEX_DIR)
79
+
80
+ return "Saved"
81
+
82
+ elif act == "load":
83
+ vs = _load_or_build_index()
84
+ if not vs:
85
+ return "" # no history yet
86
+
87
+ hits = vs.similarity_search(text, k=3)
88
+ return "\n".join(d.page_content for d in hits)
89
+
90
+ else:
91
+ return "Invalid action"