anasfsd123 commited on
Commit
5d9de73
·
verified ·
1 Parent(s): 4e06647

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +16 -24
app.py CHANGED
@@ -19,7 +19,11 @@ warnings.filterwarnings('ignore')
19
  def get_env_or_secret(key_name: str, default: str = None):
20
  """Helper to read from Streamlit secrets first, then env vars."""
21
  try:
22
- return st.secrets.get(key_name, os.getenv(key_name, default))
 
 
 
 
23
  except Exception:
24
  return os.getenv(key_name, default)
25
 
@@ -42,25 +46,17 @@ def get_active_llm_provider():
42
  def get_llm_summary(prompt: str, context: str = "") -> str:
43
  """
44
  Robust LLM summary using AIML /responses endpoint.
45
- Changes:
46
- - Wraps the prompt inside `input: {"text": ...}` as some endpoints expect.
47
- - Ensures `response_format` is set to force textual output.
48
- - Keeps the previous temperature-fallback (remove if unsupported).
49
- - Retries once with a larger token budget (without temperature) if no text returned.
50
  """
51
  # Build final prompt safely
52
  if context:
53
- full_prompt = f"{context}
54
-
55
- {prompt}"
56
  else:
57
  full_prompt = prompt
58
 
59
- api_key = get_env_or_secret("AI_ML_API_KEY") or os.getenv("AI_ML_API_KEY")
60
  if not api_key:
61
  return (
62
- "AI Analysis unavailable — AI_ML_API_KEY not configured.
63
- "
64
  "Go to Settings → Secrets → Create secret 'AI_ML_API_KEY' with your AI/ML API key."
65
  )
66
 
@@ -78,13 +74,12 @@ def get_llm_summary(prompt: str, context: str = "") -> str:
78
  except Exception as e:
79
  return None, f"AI Analysis Request Failed (parsing): {e}"
80
 
81
- # initial payload (wrap input in dict and force text response_format)
82
  base_payload = {
83
- "model": "openai/gpt-5-2025-08-07",
84
- "input": {"text": full_prompt},
85
  "max_output_tokens": 1024,
86
  "temperature": 0.0,
87
- "reasoning_effort": "low",
88
  "response_format": {"type": "text"}
89
  }
90
 
@@ -145,19 +140,16 @@ def get_llm_summary(prompt: str, context: str = "") -> str:
145
  if isinstance(c, dict) and c.get("text"):
146
  texts.append(c["text"].strip())
147
 
148
- return "
149
-
150
- ".join(texts).strip()
151
 
152
  out_text = extract_text_from_response(data)
153
  # If we received only reasoning without text, try an explicit retry that forces text
154
  if not out_text:
155
- # Second attempt: ensure response_format present and input wrapped (no temperature)
156
  retry_payload = {
157
- "model": base_payload["model"],
158
- "input": {"text": full_prompt},
159
  "max_output_tokens": 2048,
160
- "reasoning_effort": "low",
161
  "response_format": {"type": "text"}
162
  }
163
  data2, err2 = call_api(retry_payload)
@@ -731,4 +723,4 @@ def main():
731
 
732
 
733
  if __name__ == "__main__":
734
- main()
 
19
  def get_env_or_secret(key_name: str, default: str = None):
20
  """Helper to read from Streamlit secrets first, then env vars."""
21
  try:
22
+ # Try Streamlit secrets first
23
+ if hasattr(st, 'secrets') and key_name in st.secrets:
24
+ return st.secrets[key_name]
25
+ # Fall back to environment variables
26
+ return os.getenv(key_name, default)
27
  except Exception:
28
  return os.getenv(key_name, default)
29
 
 
46
  def get_llm_summary(prompt: str, context: str = "") -> str:
47
  """
48
  Robust LLM summary using AIML /responses endpoint.
 
 
 
 
 
49
  """
50
  # Build final prompt safely
51
  if context:
52
+ full_prompt = f"{context}\nUser Query: {prompt}"
 
 
53
  else:
54
  full_prompt = prompt
55
 
56
+ api_key = get_env_or_secret("AI_ML_API_KEY")
57
  if not api_key:
58
  return (
59
+ "AI Analysis unavailable — AI_ML_API_KEY not configured.\n"
 
60
  "Go to Settings → Secrets → Create secret 'AI_ML_API_KEY' with your AI/ML API key."
61
  )
62
 
 
74
  except Exception as e:
75
  return None, f"AI Analysis Request Failed (parsing): {e}"
76
 
77
+ # Fixed payload with correct model name and input format
78
  base_payload = {
79
+ "model": "gpt-4o", # Changed from "openai/gpt-4" to "gpt-4o"
80
+ "input": full_prompt, # Changed from {"text": full_prompt} to just full_prompt
81
  "max_output_tokens": 1024,
82
  "temperature": 0.0,
 
83
  "response_format": {"type": "text"}
84
  }
85
 
 
140
  if isinstance(c, dict) and c.get("text"):
141
  texts.append(c["text"].strip())
142
 
143
+ return "\n".join(texts).strip()
 
 
144
 
145
  out_text = extract_text_from_response(data)
146
  # If we received only reasoning without text, try an explicit retry that forces text
147
  if not out_text:
148
+ # Second attempt: try with gpt-4o-mini as fallback
149
  retry_payload = {
150
+ "model": "gpt-4o-mini", # Use mini version as fallback
151
+ "input": full_prompt,
152
  "max_output_tokens": 2048,
 
153
  "response_format": {"type": "text"}
154
  }
155
  data2, err2 = call_api(retry_payload)
 
723
 
724
 
725
  if __name__ == "__main__":
726
+ main()