Spaces:
Sleeping
Sleeping
| import os | |
| import requests | |
| HF_MODEL_URL = "https://api-inference.huggingface.co/models/mistralai/Mistral-7B-Instruct-v0.2" | |
| HF_TOKEN = os.environ.get("HF_TOKEN") | |
| HEADERS = {"Authorization": f"Bearer {HF_TOKEN}"} | |
| def ask_llm(prompt: str, max_tokens: int = 300) -> str: | |
| if not HF_TOKEN: | |
| return "Error: HF_TOKEN environment variable not set." | |
| payload = { | |
| "inputs": prompt, | |
| "parameters": {"max_new_tokens": max_tokens}, | |
| "options": {"wait_for_model": True} | |
| } | |
| try: | |
| response = requests.post(HF_MODEL_URL, headers=HEADERS, json=payload, timeout=120) | |
| response.raise_for_status() | |
| result = response.json() | |
| if isinstance(result, list) and "generated_text" in result[0]: | |
| return result[0]["generated_text"] | |
| else: | |
| return str(result) | |
| except requests.exceptions.RequestException as e: | |
| return f"Error querying LLM: {e}" | |