Spaces:
Sleeping
Sleeping
Update app.py
Browse files
app.py
CHANGED
|
@@ -39,50 +39,50 @@ def get_active_llm_provider():
|
|
| 39 |
return None
|
| 40 |
|
| 41 |
|
|
|
|
|
|
|
|
|
|
|
|
|
| 42 |
def get_llm_summary(prompt: str, context: str = "") -> str:
|
| 43 |
"""
|
| 44 |
-
Generate an expert summary using the
|
| 45 |
-
|
| 46 |
-
Priority:
|
| 47 |
-
1) AI/ML API (OpenAI-compatible endpoint)
|
| 48 |
-
- Required secrets/env:
|
| 49 |
-
AI_ML_API_KEY (required)
|
| 50 |
-
AI_ML_API_BASE (optional, default https://api.openai.com/v1)
|
| 51 |
-
AI_ML_MODEL (optional, default gpt-5)
|
| 52 |
-
2) Groq API (fallback)
|
| 53 |
-
- GROQ_API_KEY required
|
| 54 |
"""
|
| 55 |
-
provider = get_active_llm_provider()
|
| 56 |
full_prompt = f"{context}\n\n{prompt}" if context else prompt
|
| 57 |
|
| 58 |
-
|
| 59 |
-
|
| 60 |
-
|
| 61 |
-
"
|
| 62 |
-
"
|
| 63 |
-
)
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 64 |
}
|
| 65 |
|
| 66 |
-
|
| 67 |
-
|
| 68 |
-
|
| 69 |
-
|
| 70 |
-
|
| 71 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 72 |
|
| 73 |
-
client = OpenAI(api_key=api_key, base_url=base_url)
|
| 74 |
-
resp = client.chat.completions.create(
|
| 75 |
-
model=model,
|
| 76 |
-
messages=[system_msg, {"role": "user", "content": full_prompt}],
|
| 77 |
-
temperature=0.7,
|
| 78 |
-
top_p=0.9,
|
| 79 |
-
max_tokens=1200,
|
| 80 |
-
presence_penalty=0.1,
|
| 81 |
-
frequency_penalty=0.1,
|
| 82 |
-
)
|
| 83 |
-
return resp.choices[0].message.content
|
| 84 |
-
except Exception as e:
|
| 85 |
-
return f"AI Analysis Error (AI/ML API): {e}"
|
| 86 |
|
| 87 |
elif provider == "groq":
|
| 88 |
try:
|
|
|
|
| 39 |
return None
|
| 40 |
|
| 41 |
|
| 42 |
+
# =============================
|
| 43 |
+
# 🔐 AIML API Integration
|
| 44 |
+
# =============================
|
| 45 |
+
|
| 46 |
def get_llm_summary(prompt: str, context: str = "") -> str:
|
| 47 |
"""
|
| 48 |
+
Generate an expert summary using the AIML API.
|
| 49 |
+
Uses the AIML_API_KEY secret and AIML API endpoint.
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 50 |
"""
|
|
|
|
| 51 |
full_prompt = f"{context}\n\n{prompt}" if context else prompt
|
| 52 |
|
| 53 |
+
api_key = os.getenv("AIML_API_KEY") # Hugging Face Spaces secret
|
| 54 |
+
if not api_key:
|
| 55 |
+
return (
|
| 56 |
+
"AI Analysis unavailable — AIML_API_KEY not configured.\n"
|
| 57 |
+
"Go to Settings → Secrets → Create secret 'AIML_API_KEY' with your AIML API key."
|
| 58 |
+
)
|
| 59 |
+
|
| 60 |
+
url = "https://api.aimlapi.com/v1/responses"
|
| 61 |
+
headers = {
|
| 62 |
+
"Content-Type": "application/json",
|
| 63 |
+
"Authorization": f"Bearer {api_key}"
|
| 64 |
+
}
|
| 65 |
+
payload = {
|
| 66 |
+
"model": "openai/gpt-5-2025-08-07",
|
| 67 |
+
"input": full_prompt
|
| 68 |
}
|
| 69 |
|
| 70 |
+
try:
|
| 71 |
+
response = requests.post(url, headers=headers, json=payload, timeout=30)
|
| 72 |
+
if response.status_code == 200:
|
| 73 |
+
data = response.json()
|
| 74 |
+
# AIML API usually returns: data['output'] or data['responses'][0]['text']
|
| 75 |
+
if "output" in data:
|
| 76 |
+
return data["output"]
|
| 77 |
+
elif "responses" in data and len(data["responses"]) > 0:
|
| 78 |
+
return data["responses"][0].get("text", "No text returned")
|
| 79 |
+
else:
|
| 80 |
+
return "No valid response returned by AIML API."
|
| 81 |
+
else:
|
| 82 |
+
return f"AI Analysis Error (AIML API {response.status_code}): {response.text}"
|
| 83 |
+
except Exception as e:
|
| 84 |
+
return f"AI Analysis Request Failed: {e}"
|
| 85 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 86 |
|
| 87 |
elif provider == "groq":
|
| 88 |
try:
|