| import requests |
| import time |
| from utils.config import config |
|
|
| def check_ollama_status(): |
| """ |
| Checks if Ollama is running and which model is loaded. |
| |
| Returns: |
| dict: { |
| "running": True/False, |
| "model_loaded": "mistral-7b" or None, |
| "ngrok_url": "https://f943b91f0a0c.ngrok-free.app/", |
| "local_url": "http://localhost:11434/" |
| } |
| """ |
| |
| ngrok_url = config.ollama_host |
| local_url = "http://localhost:11434/" |
|
|
| def _get_model_from_url(base_url, retries=3, delay=1): |
| """Try to get model info with retry logic""" |
| for attempt in range(retries): |
| try: |
| |
| headers = { |
| "ngrok-skip-browser-warning": "true", |
| "User-Agent": "AI-Life-Coach-App" |
| } |
| response = requests.get(f"{base_url}/api/tags", timeout=10, headers=headers) |
| if response.status_code == 200: |
| models = response.json().get("models", []) |
| if models: |
| return models[0].get("name") |
| elif response.status_code == 404: |
| |
| response2 = requests.get(f"{base_url}", timeout=10, headers=headers) |
| if response2.status_code == 200: |
| |
| return "unknown-model" |
| except Exception as e: |
| if attempt < retries - 1: |
| time.sleep(delay * (2 ** attempt)) |
| continue |
| return None |
|
|
| |
| remote_model = _get_model_from_url(ngrok_url) |
| local_model = None |
| |
| if not remote_model: |
| local_model = _get_model_from_url(local_url) |
|
|
| model_loaded = remote_model or local_model |
| running = bool(model_loaded) |
|
|
| return { |
| "running": running, |
| "model_loaded": model_loaded, |
| "ngrok_url": ngrok_url, |
| "local_url": local_url, |
| } |
|
|