Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| import requests | |
| import json | |
| OLLAMA_URL = "http://localhost:11434" | |
| def chat(message, history): | |
| """Send message to Ollama and get response""" | |
| try: | |
| # Format conversation history for Ollama | |
| messages = [] | |
| for human, assistant in history: | |
| messages.append({"role": "user", "content": human}) | |
| if assistant: | |
| messages.append({"role": "assistant", "content": assistant}) | |
| messages.append({"role": "user", "content": message}) | |
| # Call Ollama API | |
| response = requests.post( | |
| f"{OLLAMA_URL}/api/chat", | |
| json={ | |
| "model": "johnli-persona", | |
| "messages": messages, | |
| "stream": False | |
| }, | |
| timeout=120 | |
| ) | |
| if response.status_code == 200: | |
| return response.json()["message"]["content"] | |
| else: | |
| return f"Error: {response.status_code} - {response.text}" | |
| except requests.exceptions.ConnectionError: | |
| return "β³ Model is still loading... Please wait 30 seconds and try again." | |
| except Exception as e: | |
| return f"Error: {str(e)}" | |
| def check_status(): | |
| """Check if Ollama is running and model is loaded""" | |
| try: | |
| response = requests.get(f"{OLLAMA_URL}/api/tags", timeout=10) | |
| if response.status_code == 200: | |
| models = response.json().get("models", []) | |
| model_names = [m["name"] for m in models] | |
| if any("johnli-persona" in name for name in model_names): | |
| return "β Model 'johnli-persona' is loaded and ready!" | |
| else: | |
| return f"β³ Ollama running, but model not loaded yet. Available: {model_names}" | |
| return f"β οΈ Ollama responded with: {response.status_code}" | |
| except: | |
| return "β Ollama is not running yet. Please wait..." | |
| # Create Gradio Interface | |
| with gr.Blocks(title="JohnLi Persona Chatbot") as demo: | |
| gr.Markdown("# π€ JohnLi Persona Chatbot") | |
| gr.Markdown("Chat with John Lenard Libertad's AI persona!") | |
| with gr.Row(): | |
| status_btn = gr.Button("π Check Status") | |
| status_output = gr.Textbox(label="Status", interactive=False) | |
| status_btn.click(fn=check_status, outputs=status_output) | |
| gr.ChatInterface( | |
| fn=chat, | |
| examples=[ | |
| "Hi! Who are you?", | |
| "What's your philosophy in life?", | |
| "Tell me about your projects", | |
| "What technologies do you work with?" | |
| ], | |
| title="" | |
| ) | |
| demo.launch(server_name="0.0.0.0", server_port=7860) |