Spaces:
Running
Running
| import requests | |
| from src.api_clients import get_openrouter_models, openai_client | |
| import os | |
| def generate_summary_from_openrouter(article, prompt, model): | |
| headers = { | |
| "Authorization": f"Bearer {os.getenv('OPENROUTER_API_KEY')}", | |
| "X-Title": "BrainDrive Summary Gen" | |
| } | |
| payload = { | |
| "model": model, | |
| "messages": [ | |
| {"role": "user", "content": f"{prompt}\n\n{article}"} | |
| ], | |
| "max_tokens": 1000 | |
| } | |
| res = requests.post("https://openrouter.ai/api/v1/chat/completions", headers=headers, json=payload) | |
| return res.json()["choices"][0]["message"]["content"] | |
| def is_prompt_valid_for_summary(prompt): | |
| check_prompt = f"You are a summarization prompt checker. Determine if the following prompt is strictly asking for a summary: '{prompt}'. Reply 'Yes' or 'No'." | |
| reply = openai_client.chat.completions.create( | |
| model="gpt-4o-mini", # Note: Original had gpt-4.1-mini, but assuming typo; use gpt-4o-mini for compatibility | |
| messages=[{"role": "user", "content": check_prompt}], | |
| max_tokens=10 | |
| ).choices[0].message.content.strip().lower() | |
| return "yes" in reply |