| import httpx |
| import asyncio |
| from config import CEREBRAS_API_KEY, GROQ_API_KEY, TIMEOUT_SECONDS, MAX_RETRIES, SYSTEM_PROMPT |
|
|
| async def fetch_completion(url: str, headers: dict, data: dict) -> str: |
| async with httpx.AsyncClient(timeout=TIMEOUT_SECONDS) as client: |
| response = await client.post(url, headers=headers, json=data) |
| response.raise_for_status() |
| return response.json()["choices"][0]["message"]["content"] |
|
|
| async def call_llm(task: str) -> str: |
| messages = [ |
| {"role": "system", "content": SYSTEM_PROMPT}, |
| {"role": "user", "content": task} |
| ] |
|
|
| |
| if CEREBRAS_API_KEY: |
| try: |
| return await fetch_completion( |
| "https://api.cerebras.ai/v1/chat/completions", |
| {"Authorization": f"Bearer {CEREBRAS_API_KEY}", "Content-Type": "application/json"}, |
| {"model": "llama-3.3-70b", "messages": messages} |
| ) |
| except Exception as e: |
| pass |
|
|
| |
| if GROQ_API_KEY: |
| try: |
| return await fetch_completion( |
| "https://api.groq.com/openai/v1/chat/completions", |
| {"Authorization": f"Bearer {GROQ_API_KEY}", "Content-Type": "application/json"}, |
| {"model": "llama-3.3-70b-versatile", "messages": messages} |
| ) |
| except Exception as e: |
| pass |
|
|
| return "Error: All available models failed or no API keys provided." |