Humanlearning commited on
Commit
c9bfbc5
·
1 Parent(s): 3708220

+ requirements update

Browse files
Files changed (5) hide show
  1. .gitignore +3 -1
  2. agents/lead_agent.py +74 -8
  3. pyproject.toml +2 -0
  4. requirements.txt +23 -4
  5. uv.lock +4 -0
.gitignore CHANGED
@@ -2,4 +2,6 @@ env*
2
  .env*
3
  review.csv
4
  *.csv
5
- *.cpython
 
 
 
2
  .env*
3
  review.csv
4
  *.csv
5
+ *.cpython*
6
+ *.pyc
7
+ *__pycache__*
agents/lead_agent.py CHANGED
@@ -78,14 +78,80 @@ def lead_agent(state: Dict[str, Any]) -> Command[Literal["research", "code", "fo
78
  # Check for termination conditions first
79
  if loop_counter >= max_iterations:
80
  print("🔄 Maximum iterations reached, proceeding to formatter")
81
- return Command(
82
- goto="formatter",
83
- update={
84
- "loop_counter": loop_counter + 1,
85
- "next": "formatter"
86
- }
87
- )
88
-
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
89
  try:
90
  # Get the system prompt
91
  system_prompt = load_system_prompt()
 
78
  # Check for termination conditions first
79
  if loop_counter >= max_iterations:
80
  print("🔄 Maximum iterations reached, proceeding to formatter")
81
+
82
+ # Create draft answer even when max iterations reached
83
+ research_notes = state.get("research_notes", "")
84
+ code_outputs = state.get("code_outputs", "")
85
+ messages = state.get("messages", [])
86
+ user_query = ""
87
+ for msg in messages:
88
+ if isinstance(msg, HumanMessage):
89
+ user_query = msg.content
90
+ break
91
+
92
+ # Create a comprehensive draft answer from gathered information
93
+ draft_prompt = f"""
94
+ Create a comprehensive answer based on all gathered information:
95
+
96
+ Original Question: {user_query}
97
+
98
+ Research Information:
99
+ {research_notes}
100
+
101
+ Code Results:
102
+ {code_outputs}
103
+
104
+ Instructions:
105
+ 1. Synthesize all available information to answer the question
106
+ 2. If computational results are available, include them
107
+ 3. If research provides context, incorporate it
108
+ 4. Provide a clear, direct answer to the user's question
109
+ 5. Focus on accuracy and completeness
110
+
111
+ What is your answer to the user's question?
112
+ """
113
+
114
+ try:
115
+ # Initialize LLM for draft creation
116
+ llm = ChatGroq(
117
+ model="llama-3.3-70b-versatile",
118
+ temperature=0.1,
119
+ max_tokens=1024
120
+ )
121
+
122
+ system_prompt = load_system_prompt()
123
+ draft_messages = [
124
+ SystemMessage(content=system_prompt),
125
+ HumanMessage(content=draft_prompt)
126
+ ]
127
+
128
+ draft_response = llm.invoke(draft_messages)
129
+ draft_content = draft_response.content if hasattr(draft_response, 'content') else str(draft_response)
130
+ print(f"📝 Lead Agent: Created draft answer at max iterations ({len(draft_content)} characters)")
131
+
132
+ return Command(
133
+ goto="formatter",
134
+ update={
135
+ "loop_counter": loop_counter + 1,
136
+ "next": "formatter",
137
+ "draft_answer": draft_content
138
+ }
139
+ )
140
+
141
+ except Exception as e:
142
+ print(f"⚠️ Error creating draft answer at max iterations: {e}")
143
+ # Fallback - create a simple answer from available data
144
+ fallback_answer = f"Based on the available information:\n\nResearch: {research_notes}\nCalculations: {code_outputs}"
145
+
146
+ return Command(
147
+ goto="formatter",
148
+ update={
149
+ "loop_counter": loop_counter + 1,
150
+ "next": "formatter",
151
+ "draft_answer": fallback_answer
152
+ }
153
+ )
154
+
155
  try:
156
  # Get the system prompt
157
  system_prompt = load_system_prompt()
pyproject.toml CHANGED
@@ -38,4 +38,6 @@ dependencies = [
38
  "arxiv>=2.2.0",
39
  "langchain-tavily>=0.2.4",
40
  "python-dotenv>=1.1.0",
 
 
41
  ]
 
38
  "arxiv>=2.2.0",
39
  "langchain-tavily>=0.2.4",
40
  "python-dotenv>=1.1.0",
41
+ "langchain-core>=0.3.65",
42
+ "pydantic>=2.11.5",
43
  ]
requirements.txt CHANGED
@@ -8,6 +8,7 @@ aiohttp==3.12.9
8
  # via
9
  # fsspec
10
  # langchain-community
 
11
  # llama-index-core
12
  # realtime
13
  aiosignal==1.3.2
@@ -26,7 +27,9 @@ anyio==4.9.0
26
  # openai
27
  # starlette
28
  arxiv==2.2.0
29
- # via llama-index-readers-papers
 
 
30
  asttokens==3.0.0
31
  # via stack-data
32
  async-timeout==4.0.3
@@ -266,16 +269,19 @@ langchain==0.3.25
266
  # via
267
  # final-assignment-template (pyproject.toml)
268
  # langchain-community
 
269
  langchain-community==0.3.25
270
  # via final-assignment-template (pyproject.toml)
271
  langchain-core==0.3.65
272
  # via
 
273
  # langchain
274
  # langchain-community
275
  # langchain-google-genai
276
  # langchain-groq
277
  # langchain-huggingface
278
  # langchain-openai
 
279
  # langchain-text-splitters
280
  # langgraph
281
  # langgraph-checkpoint
@@ -288,6 +294,8 @@ langchain-huggingface==0.3.0
288
  # via final-assignment-template (pyproject.toml)
289
  langchain-openai==0.3.24
290
  # via final-assignment-template (pyproject.toml)
 
 
291
  langchain-text-splitters==0.3.8
292
  # via langchain
293
  langfuse==3.0.0
@@ -405,8 +413,12 @@ multidict==6.4.4
405
  # yarl
406
  multiprocess==0.70.16
407
  # via datasets
 
 
408
  mypy-extensions==1.1.0
409
- # via typing-inspect
 
 
410
  nest-asyncio==1.6.0
411
  # via
412
  # ipykernel
@@ -496,6 +508,8 @@ pandas==2.2.3
496
  # llama-index-readers-file
497
  parso==0.8.4
498
  # via jedi
 
 
499
  pillow==11.2.1
500
  # via
501
  # gradio
@@ -544,6 +558,7 @@ pyasn1-modules==0.4.2
544
  # via google-auth
545
  pydantic==2.11.5
546
  # via
 
547
  # banks
548
  # fastapi
549
  # gotrue
@@ -588,6 +603,7 @@ python-dateutil==2.9.0.post0
588
  # storage3
589
  python-dotenv==1.1.0
590
  # via
 
591
  # dotenv
592
  # llama-cloud-services
593
  # pydantic-settings
@@ -595,7 +611,6 @@ python-multipart==0.0.20
595
  # via gradio
596
  pytz==2025.2
597
  # via pandas
598
- # pywin32==310
599
  # via jupyter-core
600
  pyyaml==6.0.2
601
  # via
@@ -626,6 +641,7 @@ requests==2.32.3
626
  # huggingface-hub
627
  # langchain
628
  # langchain-community
 
629
  # langfuse
630
  # langsmith
631
  # llama-index-core
@@ -718,7 +734,9 @@ tokenizers==0.21.1
718
  # langchain-huggingface
719
  # transformers
720
  tomli==2.2.1
721
- # via pytest
 
 
722
  tomlkit==0.13.3
723
  # via gradio
724
  torch==2.7.1
@@ -764,6 +782,7 @@ typing-extensions==4.14.0
764
  # langchain-core
765
  # llama-index-core
766
  # multidict
 
767
  # openai
768
  # opentelemetry-api
769
  # opentelemetry-exporter-otlp-proto-grpc
 
8
  # via
9
  # fsspec
10
  # langchain-community
11
+ # langchain-tavily
12
  # llama-index-core
13
  # realtime
14
  aiosignal==1.3.2
 
27
  # openai
28
  # starlette
29
  arxiv==2.2.0
30
+ # via
31
+ # final-assignment-template (pyproject.toml)
32
+ # llama-index-readers-papers
33
  asttokens==3.0.0
34
  # via stack-data
35
  async-timeout==4.0.3
 
269
  # via
270
  # final-assignment-template (pyproject.toml)
271
  # langchain-community
272
+ # langchain-tavily
273
  langchain-community==0.3.25
274
  # via final-assignment-template (pyproject.toml)
275
  langchain-core==0.3.65
276
  # via
277
+ # final-assignment-template (pyproject.toml)
278
  # langchain
279
  # langchain-community
280
  # langchain-google-genai
281
  # langchain-groq
282
  # langchain-huggingface
283
  # langchain-openai
284
+ # langchain-tavily
285
  # langchain-text-splitters
286
  # langgraph
287
  # langgraph-checkpoint
 
294
  # via final-assignment-template (pyproject.toml)
295
  langchain-openai==0.3.24
296
  # via final-assignment-template (pyproject.toml)
297
+ langchain-tavily==0.2.4
298
+ # via final-assignment-template (pyproject.toml)
299
  langchain-text-splitters==0.3.8
300
  # via langchain
301
  langfuse==3.0.0
 
413
  # yarl
414
  multiprocess==0.70.16
415
  # via datasets
416
+ mypy==1.16.1
417
+ # via langchain-tavily
418
  mypy-extensions==1.1.0
419
+ # via
420
+ # mypy
421
+ # typing-inspect
422
  nest-asyncio==1.6.0
423
  # via
424
  # ipykernel
 
508
  # llama-index-readers-file
509
  parso==0.8.4
510
  # via jedi
511
+ pathspec==0.12.1
512
+ # via mypy
513
  pillow==11.2.1
514
  # via
515
  # gradio
 
558
  # via google-auth
559
  pydantic==2.11.5
560
  # via
561
+ # final-assignment-template (pyproject.toml)
562
  # banks
563
  # fastapi
564
  # gotrue
 
603
  # storage3
604
  python-dotenv==1.1.0
605
  # via
606
+ # final-assignment-template (pyproject.toml)
607
  # dotenv
608
  # llama-cloud-services
609
  # pydantic-settings
 
611
  # via gradio
612
  pytz==2025.2
613
  # via pandas
 
614
  # via jupyter-core
615
  pyyaml==6.0.2
616
  # via
 
641
  # huggingface-hub
642
  # langchain
643
  # langchain-community
644
+ # langchain-tavily
645
  # langfuse
646
  # langsmith
647
  # llama-index-core
 
734
  # langchain-huggingface
735
  # transformers
736
  tomli==2.2.1
737
+ # via
738
+ # mypy
739
+ # pytest
740
  tomlkit==0.13.3
741
  # via gradio
742
  torch==2.7.1
 
782
  # langchain-core
783
  # llama-index-core
784
  # multidict
785
+ # mypy
786
  # openai
787
  # opentelemetry-api
788
  # opentelemetry-exporter-otlp-proto-grpc
uv.lock CHANGED
@@ -538,6 +538,7 @@ dependencies = [
538
  { name = "ipywidgets" },
539
  { name = "langchain" },
540
  { name = "langchain-community" },
 
541
  { name = "langchain-google-genai" },
542
  { name = "langchain-groq" },
543
  { name = "langchain-huggingface" },
@@ -556,6 +557,7 @@ dependencies = [
556
  { name = "llama-index-tools-tavily-research" },
557
  { name = "opencv-python" },
558
  { name = "pandas" },
 
559
  { name = "python-dotenv" },
560
  { name = "rich" },
561
  { name = "sentence-transformers" },
@@ -575,6 +577,7 @@ requires-dist = [
575
  { name = "ipywidgets", specifier = ">=8.1.7" },
576
  { name = "langchain", specifier = ">=0.3.25" },
577
  { name = "langchain-community", specifier = ">=0.3.25" },
 
578
  { name = "langchain-google-genai", specifier = ">=2.1.5" },
579
  { name = "langchain-groq", specifier = ">=0.3.2" },
580
  { name = "langchain-huggingface", specifier = ">=0.3.0" },
@@ -593,6 +596,7 @@ requires-dist = [
593
  { name = "llama-index-tools-tavily-research", specifier = ">=0.3.0" },
594
  { name = "opencv-python", specifier = ">=4.11.0.86" },
595
  { name = "pandas", specifier = ">=2.2.3" },
 
596
  { name = "python-dotenv", specifier = ">=1.1.0" },
597
  { name = "rich", specifier = ">=14.0.0" },
598
  { name = "sentence-transformers", specifier = ">=4.1.0" },
 
538
  { name = "ipywidgets" },
539
  { name = "langchain" },
540
  { name = "langchain-community" },
541
+ { name = "langchain-core" },
542
  { name = "langchain-google-genai" },
543
  { name = "langchain-groq" },
544
  { name = "langchain-huggingface" },
 
557
  { name = "llama-index-tools-tavily-research" },
558
  { name = "opencv-python" },
559
  { name = "pandas" },
560
+ { name = "pydantic" },
561
  { name = "python-dotenv" },
562
  { name = "rich" },
563
  { name = "sentence-transformers" },
 
577
  { name = "ipywidgets", specifier = ">=8.1.7" },
578
  { name = "langchain", specifier = ">=0.3.25" },
579
  { name = "langchain-community", specifier = ">=0.3.25" },
580
+ { name = "langchain-core", specifier = ">=0.3.65" },
581
  { name = "langchain-google-genai", specifier = ">=2.1.5" },
582
  { name = "langchain-groq", specifier = ">=0.3.2" },
583
  { name = "langchain-huggingface", specifier = ">=0.3.0" },
 
596
  { name = "llama-index-tools-tavily-research", specifier = ">=0.3.0" },
597
  { name = "opencv-python", specifier = ">=4.11.0.86" },
598
  { name = "pandas", specifier = ">=2.2.3" },
599
+ { name = "pydantic", specifier = ">=2.11.5" },
600
  { name = "python-dotenv", specifier = ">=1.1.0" },
601
  { name = "rich", specifier = ">=14.0.0" },
602
  { name = "sentence-transformers", specifier = ">=4.1.0" },