chat-bot / app.py
surahj's picture
Simplify app for HF Spaces deployment - remove llama-cpp-python dependency
e9db321
#!/usr/bin/env python3
"""
Main entry point for Hugging Face Spaces deployment
"""
import gradio as gr
import os
import sys
from pathlib import Path
# Add the app directory to the Python path
sys.path.append(str(Path(__file__).parent / "app"))
def create_simple_chat_interface():
"""Create a simple chat interface that works on HF Spaces"""
# Simple chat history
chat_history = []
def send_message(message, history):
"""Simple message handler"""
if not message.strip():
return history, ""
# Add user message to history
history.append([message, None])
# Simple response generation (mock for now)
responses = [
"Hello! I'm a helpful AI assistant. How can I help you today?",
"That's an interesting question! Let me think about that.",
"I'd be happy to help you with that.",
"Thanks for your message! I'm here to assist you.",
"Great question! Here's what I can tell you about that.",
]
import random
response = random.choice(responses)
# Add assistant response to history
history[-1][1] = response
return history, ""
def clear_chat():
"""Clear the chat history"""
return []
# Create the interface
with gr.Blocks(
css="""
.chat-container {
max-height: 600px;
overflow-y: auto;
border: 1px solid #ddd;
border-radius: 10px;
padding: 20px;
background: white;
}
.user-message {
background-color: #007bff;
color: white;
padding: 10px 15px;
border-radius: 18px;
margin: 10px 0;
max-width: 80%;
margin-left: auto;
}
.assistant-message {
background-color: #f8f9fa;
color: #333;
padding: 10px 15px;
border-radius: 18px;
margin: 10px 0;
max-width: 80%;
margin-right: auto;
}
""",
title="LLM Chat Interface"
) as interface:
gr.Markdown("# 🤖 LLM Chat Interface")
gr.Markdown("Chat with your local LLM model using a beautiful web interface.")
# Chat display
chatbot = gr.Chatbot(
value=[],
label="Chat History",
height=400,
elem_classes=["chat-container"]
)
# Input area
with gr.Row():
message_input = gr.Textbox(
placeholder="Type your message here...",
label="Message",
lines=3,
scale=4,
)
send_btn = gr.Button("Send", variant="primary", scale=0.3)
# Clear button
clear_btn = gr.Button("Clear Chat", variant="secondary")
# Model settings section
with gr.Row():
with gr.Column(scale=2):
gr.Markdown("### ⚙️ Model Settings")
temperature_slider = gr.Slider(
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
label="Temperature",
info="Controls randomness (0 = deterministic, 2 = very random)",
)
top_p_slider = gr.Slider(
minimum=0.0,
maximum=1.0,
value=0.9,
step=0.1,
label="Top-p",
info="Controls diversity via nucleus sampling",
)
max_tokens_slider = gr.Slider(
minimum=50,
maximum=2048,
value=512,
step=50,
label="Max Tokens",
info="Maximum number of tokens to generate",
)
# System message
system_message = gr.Textbox(
placeholder="You are a helpful AI assistant.",
label="System Message",
lines=3,
info="Optional system message to set the assistant's behavior",
)
# Model status
model_status = gr.Markdown(
"**Model Status:** ✅ Ready (Mock Mode)\n"
"**Model Type:** Simple Chat Interface\n"
"**Note:** This is a demo version. Add your model files to enable full LLM functionality."
)
# Event handlers
send_btn.click(
fn=send_message,
inputs=[message_input, chatbot],
outputs=[chatbot, message_input],
)
message_input.submit(
fn=send_message,
inputs=[message_input, chatbot],
outputs=[chatbot, message_input],
)
clear_btn.click(fn=clear_chat, outputs=[chatbot])
return interface
def main():
"""Initialize and launch the Gradio interface"""
try:
# Create the interface
interface = create_simple_chat_interface()
# Launch the app
# For HF Spaces, we don't need to specify host/port as it's handled automatically
interface.launch(
share=False, show_error=True, quiet=False # HF Spaces handles sharing
)
except Exception as e:
print(f"Error launching interface: {e}")
sys.exit(1)
if __name__ == "__main__":
main()