Naveen-2007 commited on
Commit
c17f43c
·
1 Parent(s): 2ffb9df

Fix: Groq model, error handling for all modes, LITE_MODE fallbacks

Browse files
Files changed (3) hide show
  1. app/api.py +80 -34
  2. config/config.py +4 -10
  3. render.yaml +1 -1
app/api.py CHANGED
@@ -487,13 +487,18 @@ def deep_research(req: ChatRequest):
487
  memory.add(ws, "user", q)
488
 
489
  try:
490
- state = deep_graph.run(q)
491
- # state is a dict (TypedDict), not an object
492
- answer = state.get("final_answer", "No answer generated.")
493
- sources = state.get("sources", [])
 
 
 
 
 
494
  except Exception as e:
495
- print("Deep research error:", e)
496
- answer = "Something went wrong in deep research mode."
497
  sources = []
498
 
499
  memory.add(ws, "assistant", answer)
@@ -752,13 +757,20 @@ def analyze_mode(req: ModeRequest):
752
 
753
  memory.add(ws, "user", q)
754
 
755
- # Run the AnalysisGraph pipeline
756
- state = analysis_graph.run(q)
757
-
758
- answer = state.get("answer", "No analysis generated.")
759
- sources = state.get("sources", [])
760
- links = state.get("links", [])
761
- follow = state.get("followups", [])
 
 
 
 
 
 
 
762
 
763
  # Get related images
764
  images = tavily_images_safe(q)
@@ -916,13 +928,20 @@ def web_search_mode(req: ModeRequest):
916
 
917
  memory.add(ws, "user", q)
918
 
919
- # Run the WebSearchGraph pipeline
920
- state = web_graph.run(q)
921
-
922
- answer = state.get("answer", "No answer generated.")
923
- sources = state.get("sources", [])
924
- links = state.get("links", [])
925
- follow = state.get("followups", [])
 
 
 
 
 
 
 
926
 
927
  # Get images separately
928
  images = tavily_images_safe(q)
@@ -951,12 +970,23 @@ def rag_mode(req: ModeRequest):
951
 
952
  memory.add(ws, "user", q)
953
 
954
- # Run the RAGOnlyGraph pipeline
955
- state = rag_graph.run(q, ws)
956
-
957
- answer = state.get("answer", "No answer generated.")
958
- sources = state.get("sources", [])
959
- follow = state.get("followups", [])
 
 
 
 
 
 
 
 
 
 
 
960
 
961
  memory.add(ws, "assistant", answer)
962
 
@@ -983,14 +1013,30 @@ def agentic_mode(req: ModeRequest):
983
  memory.add(ws, "user", q)
984
  print(f"\n🤖 AGENTIC MODE (LangGraph): {q}")
985
 
986
- # Run the AgenticRAGGraph pipeline
987
- state = agentic_graph.run(q, ws)
988
-
989
- answer = state.get("answer", "No answer generated.")
990
- sources = state.get("sources", [])
991
- links = state.get("links", [])
992
- images = state.get("images", [])
993
- follow = state.get("followups", [])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
994
 
995
  memory.add(ws, "assistant", answer)
996
  print(f" ✅ AgenticGraph: Completed with {len(sources)} sources")
 
487
  memory.add(ws, "user", q)
488
 
489
  try:
490
+ if deep_graph is None:
491
+ # LITE_MODE fallback - use web search instead
492
+ state = web_graph.run(q)
493
+ answer = state.get("answer", "No answer generated.")
494
+ sources = state.get("sources", [])
495
+ else:
496
+ state = deep_graph.run(q)
497
+ answer = state.get("final_answer", "No answer generated.")
498
+ sources = state.get("sources", [])
499
  except Exception as e:
500
+ print(f"Deep research error: {e}")
501
+ answer = f"Deep research encountered an error. Please try again."
502
  sources = []
503
 
504
  memory.add(ws, "assistant", answer)
 
757
 
758
  memory.add(ws, "user", q)
759
 
760
+ try:
761
+ # Run the AnalysisGraph pipeline
762
+ state = analysis_graph.run(q)
763
+
764
+ answer = state.get("answer", "No analysis generated.")
765
+ sources = state.get("sources", [])
766
+ links = state.get("links", [])
767
+ follow = state.get("followups", [])
768
+ except Exception as e:
769
+ print(f"Analysis error: {e}")
770
+ answer = f"Analysis encountered an error: {str(e)[:100]}"
771
+ sources = []
772
+ links = []
773
+ follow = []
774
 
775
  # Get related images
776
  images = tavily_images_safe(q)
 
928
 
929
  memory.add(ws, "user", q)
930
 
931
+ try:
932
+ # Run the WebSearchGraph pipeline
933
+ state = web_graph.run(q)
934
+
935
+ answer = state.get("answer", "No answer generated.")
936
+ sources = state.get("sources", [])
937
+ links = state.get("links", [])
938
+ follow = state.get("followups", [])
939
+ except Exception as e:
940
+ print(f"Web search error: {e}")
941
+ answer = f"Web search encountered an error: {str(e)[:100]}"
942
+ sources = []
943
+ links = []
944
+ follow = []
945
 
946
  # Get images separately
947
  images = tavily_images_safe(q)
 
970
 
971
  memory.add(ws, "user", q)
972
 
973
+ try:
974
+ if rag_graph is None:
975
+ # LITE_MODE fallback
976
+ answer = "RAG mode requires document uploads. In lite mode, please use Web Search instead."
977
+ sources = []
978
+ follow = []
979
+ else:
980
+ # Run the RAGOnlyGraph pipeline
981
+ state = rag_graph.run(q, ws)
982
+ answer = state.get("answer", "No answer generated.")
983
+ sources = state.get("sources", [])
984
+ follow = state.get("followups", [])
985
+ except Exception as e:
986
+ print(f"RAG error: {e}")
987
+ answer = f"RAG mode encountered an error: {str(e)[:100]}"
988
+ sources = []
989
+ follow = []
990
 
991
  memory.add(ws, "assistant", answer)
992
 
 
1013
  memory.add(ws, "user", q)
1014
  print(f"\n🤖 AGENTIC MODE (LangGraph): {q}")
1015
 
1016
+ try:
1017
+ if agentic_graph is None:
1018
+ # LITE_MODE fallback - use web search
1019
+ state = web_graph.run(q)
1020
+ answer = state.get("answer", "No answer generated.")
1021
+ sources = state.get("sources", [])
1022
+ links = state.get("links", [])
1023
+ images = tavily_images_safe(q)
1024
+ follow = state.get("followups", [])
1025
+ else:
1026
+ # Run the AgenticRAGGraph pipeline
1027
+ state = agentic_graph.run(q, ws)
1028
+ answer = state.get("answer", "No answer generated.")
1029
+ sources = state.get("sources", [])
1030
+ links = state.get("links", [])
1031
+ images = state.get("images", [])
1032
+ follow = state.get("followups", [])
1033
+ except Exception as e:
1034
+ print(f"Agentic error: {e}")
1035
+ answer = f"Agentic mode encountered an error: {str(e)[:100]}"
1036
+ sources = []
1037
+ links = []
1038
+ images = []
1039
+ follow = []
1040
 
1041
  memory.add(ws, "assistant", answer)
1042
  print(f" ✅ AgenticGraph: Completed with {len(sources)} sources")
config/config.py CHANGED
@@ -8,8 +8,8 @@ class Config:
8
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
9
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
10
 
11
- # Use lighter model for free tier
12
- LLM_MODEL = os.getenv("LLM_MODEL", "openai/gpt-oss-20b")
13
 
14
  CHUNK_SIZE = 400
15
  CHUNK_OVERLAP = 80
@@ -19,19 +19,13 @@ class Config:
19
 
20
  @classmethod
21
  def get_llm(cls):
22
- """Return chat LLM instance with tool calling disabled."""
23
  if not cls.GROQ_API_KEY:
24
  raise RuntimeError("GROQ_API_KEY missing in .env")
25
- llm = ChatGroq(
26
  groq_api_key=cls.GROQ_API_KEY,
27
  model_name=cls.LLM_MODEL,
28
  temperature=0.7
29
  )
30
- # Disable tool calling by binding empty tools list
31
- try:
32
- return llm.bind(tools=[])
33
- except:
34
- # Fallback if bind doesn't work
35
- return llm
36
 
37
 
 
8
  GROQ_API_KEY = os.getenv("GROQ_API_KEY")
9
  TAVILY_API_KEY = os.getenv("TAVILY_API_KEY")
10
 
11
+ # Use valid Groq model - llama3-8b-8192 is fast and free
12
+ LLM_MODEL = os.getenv("LLM_MODEL", "llama3-8b-8192")
13
 
14
  CHUNK_SIZE = 400
15
  CHUNK_OVERLAP = 80
 
19
 
20
  @classmethod
21
  def get_llm(cls):
22
+ """Return chat LLM instance."""
23
  if not cls.GROQ_API_KEY:
24
  raise RuntimeError("GROQ_API_KEY missing in .env")
25
+ return ChatGroq(
26
  groq_api_key=cls.GROQ_API_KEY,
27
  model_name=cls.LLM_MODEL,
28
  temperature=0.7
29
  )
 
 
 
 
 
 
30
 
31
 
render.yaml CHANGED
@@ -17,4 +17,4 @@ services:
17
  - key: LITE_MODE
18
  value: "true"
19
  - key: LLM_MODEL
20
- value: "openai/gpt-oss-20b"
 
17
  - key: LITE_MODE
18
  value: "true"
19
  - key: LLM_MODEL
20
+ value: "llama3-8b-8192"