cpu / app.py
Nhughes09
Final robust chatbot with dynamic connection check and model selector
204d759
# app.py - Robust Local Ollama Chatbot
# Features: Dynamic Connection, Model Selection, Robust History
import gradio as gr
import requests
import logging
import sys
import shutil
from datetime import datetime
# Logging
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s | %(levelname)-8s | %(message)s",
handlers=[logging.StreamHandler(sys.stdout)]
)
logger = logging.getLogger("CHATBOT")
OLLAMA_URL = "http://localhost:11434"
# Global state
current_model = ""
available_models = []
def get_ollama_models():
"""Fetch available models from Ollama."""
try:
r = requests.get(f"{OLLAMA_URL}/api/tags", timeout=2)
if r.status_code == 200:
return [m["name"] for m in r.json().get("models", [])]
except:
pass
return []
def check_connection():
"""Check connection and update model list."""
global current_model, available_models
if shutil.which("ollama") is None:
return "❌ **Ollama not installed**", gr.update(choices=[], value=None)
models = get_ollama_models()
available_models = models
if not models:
return "⚠️ Ollama running but **NO models found**", gr.update(choices=[], value=None)
# Default logic: Prefer llama3.2, then deepseek, then first available
if not current_model or current_model not in models:
if "llama3.2:3b" in models:
current_model = "llama3.2:3b"
elif "deepseek-coder:6.7b-instruct-q6_K" in models:
current_model = "deepseek-coder:6.7b-instruct-q6_K"
else:
current_model = models[0]
return f"βœ… Connected", gr.update(choices=models, value=current_model)
def chat_fn(message, history, model_name):
"""Chat function handling history and model selection."""
if not model_name:
return "Error: No model selected. Please check connection."
# Build prompt
prompt = "You are a helpful AI assistant.\n\n"
for item in history:
if isinstance(item, dict):
role = item.get("role", "")
content = item.get("content", "")
if role == "user":
prompt += f"User: {content}\n"
elif role == "assistant":
prompt += f"Assistant: {content}\n"
prompt += f"User: {message}\nAssistant:"
try:
r = requests.post(
f"{OLLAMA_URL}/api/generate",
json={"model": model_name, "prompt": prompt, "stream": False},
timeout=120
)
if r.status_code == 200:
return r.json().get("response", "").strip()
return f"Error: Ollama returned {r.status_code}"
except Exception as e:
return f"Error: {e}"
# UI
with gr.Blocks(title="Local Chatbot") as demo:
gr.Markdown("# πŸ€– Local AI Chatbot")
with gr.Row():
status_display = gr.Markdown("**Checking connection...**")
refresh_btn = gr.Button("πŸ”„ Check Connection", size="sm")
with gr.Row():
model_dropdown = gr.Dropdown(
label="Select Model",
choices=[],
value=None,
interactive=True
)
chat_interface = gr.ChatInterface(
fn=chat_fn,
additional_inputs=[model_dropdown],
)
with gr.Accordion("πŸ› οΈ Setup Instructions", open=False):
gr.Markdown("""
1. **Install Ollama:** [ollama.com](https://ollama.com)
2. **Start Server:** `ollama serve`
3. **Download Model:** `ollama pull llama3.2:3b`
""")
# Wiring
def update_all():
status, model_update = check_connection()
return status, model_update
refresh_btn.click(update_all, outputs=[status_display, model_dropdown])
demo.load(update_all, outputs=[status_display, model_dropdown])
if __name__ == "__main__":
demo.launch()