rdune71 commited on
Commit
92eb899
·
1 Parent(s): 4a30717
Files changed (2) hide show
  1. app.py +7 -8
  2. modules/analyzer.py +44 -3
app.py CHANGED
@@ -18,7 +18,7 @@ class ResearchOrchestrator:
18
  self.analyzer = analyzer
19
  self.citation_manager = citation_manager
20
  self.formatter = formatter
21
-
22
  def run(self, query):
23
  """Execute the research pipeline with streaming updates"""
24
  try:
@@ -41,7 +41,7 @@ class ResearchOrchestrator:
41
  logging.info(f"Retrieved {len(search_results)} results")
42
 
43
  # Step 3: Analyze content
44
- yield "🧠 Analyzing search results..."
45
  analysis = self.analyzer.analyze(query, search_results)
46
  logging.info("Analysis completed")
47
 
@@ -81,16 +81,16 @@ def initialize_modules():
81
  try:
82
  if not CONFIG["tavily_api_key"]:
83
  raise ValueError("TAVILY_API_KEY environment variable is not set")
84
-
85
  if not CONFIG["hf_api_key"]:
86
  raise ValueError("HF_TOKEN environment variable is not set")
87
-
88
  input_handler = InputHandler()
89
  retriever = Retriever(api_key=CONFIG["tavily_api_key"])
90
  analyzer = Analyzer(base_url=CONFIG["hf_api_base"], api_key=CONFIG["hf_api_key"])
91
  citation_manager = CitationManager()
92
  formatter = OutputFormatter()
93
-
94
  return ResearchOrchestrator(
95
  input_handler,
96
  retriever,
@@ -124,7 +124,7 @@ with gr.Blocks(title="Research Assistant") as demo:
124
  lines=3
125
  )
126
  submit_btn = gr.Button("Research", variant="primary")
127
-
128
  with gr.Column():
129
  output = gr.Markdown(label="Analysis Results")
130
 
@@ -150,5 +150,4 @@ with gr.Blocks(title="Research Assistant") as demo:
150
  )
151
 
152
  if __name__ == "__main__":
153
- demo.launch()
154
-
 
18
  self.analyzer = analyzer
19
  self.citation_manager = citation_manager
20
  self.formatter = formatter
21
+
22
  def run(self, query):
23
  """Execute the research pipeline with streaming updates"""
24
  try:
 
41
  logging.info(f"Retrieved {len(search_results)} results")
42
 
43
  # Step 3: Analyze content
44
+ yield "🧠 Analyzing search results...\n\n⏳ The AI model may be initializing. This could take a few minutes if it's the first request..."
45
  analysis = self.analyzer.analyze(query, search_results)
46
  logging.info("Analysis completed")
47
 
 
81
  try:
82
  if not CONFIG["tavily_api_key"]:
83
  raise ValueError("TAVILY_API_KEY environment variable is not set")
84
+
85
  if not CONFIG["hf_api_key"]:
86
  raise ValueError("HF_TOKEN environment variable is not set")
87
+
88
  input_handler = InputHandler()
89
  retriever = Retriever(api_key=CONFIG["tavily_api_key"])
90
  analyzer = Analyzer(base_url=CONFIG["hf_api_base"], api_key=CONFIG["hf_api_key"])
91
  citation_manager = CitationManager()
92
  formatter = OutputFormatter()
93
+
94
  return ResearchOrchestrator(
95
  input_handler,
96
  retriever,
 
124
  lines=3
125
  )
126
  submit_btn = gr.Button("Research", variant="primary")
127
+
128
  with gr.Column():
129
  output = gr.Markdown(label="Analysis Results")
130
 
 
150
  )
151
 
152
  if __name__ == "__main__":
153
+ demo.launch()
 
modules/analyzer.py CHANGED
@@ -1,5 +1,8 @@
 
1
  from openai import OpenAI
2
- import json
 
 
3
 
4
  class Analyzer:
5
  def __init__(self, base_url, api_key):
@@ -7,7 +10,39 @@ class Analyzer:
7
  base_url=base_url,
8
  api_key=api_key
9
  )
 
 
10
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
11
  def analyze(self, query, search_results):
12
  """
13
  Analyze search results using the custom LLM
@@ -37,6 +72,10 @@ class Analyzer:
37
  """
38
 
39
  try:
 
 
 
 
40
  response = self.client.chat.completions.create(
41
  model="DavidAU/OpenAi-GPT-oss-20b-abliterated-uncensored-NEO-Imatrix-gguf",
42
  messages=[
@@ -51,5 +90,7 @@ class Analyzer:
51
  return response.choices[0].message.content
52
 
53
  except Exception as e:
54
- return f"Analysis failed: {str(e)}"
55
-
 
 
 
1
+ # modules/analyzer.py
2
  from openai import OpenAI
3
+ import requests
4
+ import time
5
+ import logging
6
 
7
  class Analyzer:
8
  def __init__(self, base_url, api_key):
 
10
  base_url=base_url,
11
  api_key=api_key
12
  )
13
+ self.health_check_url = base_url.rstrip('/') + "/health"
14
+ self.headers = {"Authorization": f"Bearer {api_key}"}
15
 
16
+ def wait_for_server(self, timeout=300, interval=10):
17
+ """
18
+ Waits for the server to become available by polling the health endpoint.
19
+
20
+ Parameters:
21
+ timeout (int): Max time in seconds to wait
22
+ interval (int): Time between checks
23
+
24
+ Returns:
25
+ bool: True if server is ready, False if timeout reached
26
+ """
27
+ logging.info("⏳ Waiting for the server to initialize...")
28
+ start_time = time.time()
29
+
30
+ while time.time() - start_time < timeout:
31
+ try:
32
+ response = requests.get(self.health_check_url, headers=self.headers, timeout=10)
33
+ if response.status_code == 200:
34
+ logging.info("✅ Server is ready!")
35
+ return True
36
+ else:
37
+ logging.info(f"🌐 Server responded with status: {response.status_code} — still initializing...")
38
+ except requests.exceptions.RequestException as e:
39
+ logging.info("🔴 Still unreachable — retrying...")
40
+
41
+ time.sleep(interval)
42
+
43
+ logging.warning("⏰ Timeout reached. Server didn't initialize in time.")
44
+ return False
45
+
46
  def analyze(self, query, search_results):
47
  """
48
  Analyze search results using the custom LLM
 
72
  """
73
 
74
  try:
75
+ # First check if server is ready
76
+ if not self.wait_for_server(timeout=180): # 3 minutes timeout
77
+ return "⚠️ The AI model server is still initializing. Please try again in a few minutes."
78
+
79
  response = self.client.chat.completions.create(
80
  model="DavidAU/OpenAi-GPT-oss-20b-abliterated-uncensored-NEO-Imatrix-gguf",
81
  messages=[
 
90
  return response.choices[0].message.content
91
 
92
  except Exception as e:
93
+ error_msg = str(e)
94
+ if "503" in error_msg or "Service Unavailable" in error_msg:
95
+ return "⚠️ The AI model server is currently unavailable. It may be initializing. Please try again in a few minutes."
96
+ return f"Analysis failed: {str(e)}"