BikoRiko's picture
Upload folder using huggingface_hub
2d5c442 verified
"""
AI Chatbot powered by HuggingFace Model
A modern conversational AI built with Gradio 6 and transformers
"""
import gradio as gr
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import warnings
warnings.filterwarnings("ignore")
# Model configuration
MODEL_NAME = "microsoft/DialoGPT-medium"
def load_chatbot():
"""
Load the conversational model and tokenizer from HuggingFace.
Returns a conversational pipeline for generating responses.
"""
try:
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME, padding_side="left")
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
model = AutoModelForCausalLM.from_pretrained(MODEL_NAME)
conversation_pipeline = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
max_new_tokens=150,
do_sample=True,
temperature=0.7,
top_p=0.95,
pad_token_id=tokenizer.eos_token_id
)
return conversation_pipeline
except Exception as e:
raise RuntimeError(f"Failed to load model: {str(e)}")
# Global pipeline variable
chat_pipeline = None
def get_chat_pipeline():
"""Get or initialize the chat pipeline."""
global chat_pipeline
if chat_pipeline is None:
chat_pipeline = load_chatbot()
return chat_pipeline
def format_prompt(message, history):
"""Format the conversation history and current message into a prompt."""
prompt = ""
# Add conversation history
for user_msg, bot_msg in history:
prompt += f"<|user|>{user_msg}<|endoftext|>"
prompt += f"<|assistant|>{bot_msg}<|endoftext|>"
# Add current message
prompt += f"<|user|>{message}<|endoftext|>"
prompt += "<|assistant|>"
return prompt
def chatbot_fn(message, history, system_prompt, max_length, temperature, top_p):
"""
Generate a response from the chatbot based on user input.
Args:
message: User's input message
history: List of previous (user, bot) message tuples
system_prompt: System prompt to set the bot's behavior
max_length: Maximum length of generated response
temperature: Randomness of generation (0.0-2.0)
top_p: Top-p sampling parameter (0.0-1.0)
Returns:
Updated conversation history with the new response
"""
# Validate input
if not message or not message.strip():
return history + [("Empty message", "Please enter a message to chat!")]
# Update pipeline parameters
pipeline = get_chat_pipeline()
pipeline.task_kwargs = {
"max_new_tokens": max_length,
"do_sample": temperature > 0,
"temperature": temperature if temperature > 0 else None,
"top_p": top_p if top_p < 1.0 else None,
}
try:
# Format the prompt with history
full_prompt = format_prompt(message, history)
# Add system prompt context
if system_prompt:
full_prompt = f"<|system|>{system_prompt}<|endoftext|>\n{full_prompt}"
# Generate response
response = pipeline(
full_prompt,
max_new_tokens=max_length,
do_sample=temperature > 0,
temperature=temperature if temperature > 0 else 0.7,
top_p=top_p if top_p < 1.0 else 0.95,
pad_token_id=50256, # EOS token for DialoGPT
truncation=True
)
# Extract the generated text
generated_text = response[0]["generated_text"]
# Remove the prompt from the response
response_text = generated_text[len(full_prompt):].strip()
# Clean up the response - remove special tokens
for token in ["<|endoftext|>", "<|user|>", "<|assistant|>", "<|system|>"]:
response_text = response_text.split(token)[0].strip()
# If response is empty or too short, provide a fallback
if not response_text or len(response_text) < 2:
response_text = "I'm not sure how to respond to that. Could you try again?"
# Add the new exchange to history
new_history = history + [(message, response_text)]
return new_history
except Exception as e:
error_msg = f"Sorry, I encountered an error: {str(e)}"
return history + [(message, error_msg)]
def clear_chat():
"""Clear the chat history."""
return []
# Create custom theme for the chatbot
chatbot_theme = gr.themes.Soft(
primary_hue="indigo",
secondary_hue="purple",
neutral_hue="slate",
font=gr.themes.GoogleFont("Inter"),
text_size="lg",
spacing_size="md",
radius_size="lg"
).set(
button_primary_background_fill="*primary_600",
button_primary_background_fill_hover="*primary_700",
button_secondary_background_fill="*secondary_300",
button_secondary_background_fill_hover="*secondary_400",
block_title_text_weight="600",
block_background_fill_dark="*neutral_800",
)
# Custom CSS for enhanced styling
custom_css = """
.gradio-container {
max-width: 1200px !important;
}
.chatbot-container {
background: linear-gradient(135deg, #f5f7fa 0%, #e4e8ec 100%);
border-radius: 16px;
padding: 20px;
}
.chat-header {
background: linear-gradient(90deg, #667eea 0%, #764ba2 100%);
-webkit-background-clip: text;
-webkit-text-fill-color: transparent;
background-clip: text;
}
.user-message {
background: linear-gradient(135deg, #667eea 0%, #764ba2 100%);
color: white;
border-radius: 18px 18px 4px 18px;
padding: 12px 16px;
margin: 8px 0;
}
.bot-message {
background: linear-gradient(135deg, #f093fb 0%, #f5576c 100%);
color: white;
border-radius: 18px 18px 18px 4px;
padding: 12px 16px;
margin: 8px 0;
}
.settings-panel {
background: rgba(255, 255, 255, 0.9);
border-radius: 12px;
padding: 16px;
box-shadow: 0 4px 6px rgba(0, 0, 0, 0.1);
}
.loading-indicator {
animation: pulse 1.5s infinite;
}
@keyframes pulse {
0%, 100% { opacity: 1; }
50% { opacity: 0.5; }
}
"""
# Build the Gradio application
with gr.Blocks(
title="AI Chatbot - Powered by HuggingFace",
fill_height=True,
fill_width=True
) as demo:
# Header with branding
with gr.Row():
gr.HTML("""
<div style="text-align: center; padding: 20px 0;">
<h1 style="font-size: 2.5em; margin-bottom: 10px; background: linear-gradient(90deg, #667eea 0%, #764ba2 100%); -webkit-background-clip: text; -webkit-text-fill-color: transparent; background-clip: text;">
πŸ€– AI Chatbot
</h1>
<p style="color: #666; font-size: 1.1em;">
Powered by Microsoft DialoGPT β€’ Built with <a href="https://huggingface.co/spaces/akhaliq/anycoder" target="_blank" style="color: #667eea;">anycoder</a>
</p>
</div>
""")
# Main content area
with gr.Row(equal_height=True):
# Settings sidebar
with gr.Column(scale=1, min_width=280):
with gr.Group(elem_classes=["settings-panel"]):
gr.Markdown("### βš™οΈ Chat Settings")
system_prompt = gr.Textbox(
label="System Prompt",
placeholder="You are a helpful and friendly assistant...",
value="You are a helpful, friendly AI assistant. You provide clear, concise, and accurate responses.",
lines=3,
max_lines=5
)
with gr.Accordion("Advanced Settings", open=False):
max_length = gr.Slider(
label="Max Response Length",
minimum=50,
maximum=300,
value=150,
step=10
)
temperature = gr.Slider(
label="Temperature (Creativity)",
minimum=0.0,
maximum=2.0,
value=0.7,
step=0.1,
info="Higher = more creative, Lower = more focused"
)
top_p = gr.Slider(
label="Top-p Sampling",
minimum=0.1,
maximum=1.0,
value=0.95,
step=0.05,
info="Controls vocabulary diversity"
)
clear_btn = gr.Button(
"πŸ—‘οΈ Clear Chat",
variant="secondary",
size="lg"
)
gr.Markdown("""
### πŸ’‘ Tips
- Be specific in your questions
- Try different prompts
- Adjust temperature for creativity
- Clear chat to start fresh
""")
# Chat interface
with gr.Column(scale=3):
with gr.Group(elem_classes=["chatbot-container"]):
chatbot = gr.Chatbot(
label="πŸ’¬ Conversation",
placeholder="Start chatting with the AI!",
height=450,
bubble_full_width=False,
avatar_images=("πŸ‘€", "πŸ€–"),
latex_delimiters=[],
show_copy_button=True,
render_markdown=True
)
# Input area
with gr.Row():
message_input = gr.Textbox(
placeholder="Type your message here...",
label=None,
show_label=False,
scale=5,
lines=2,
max_lines=4,
submit_btn=True
)
submit_btn = gr.Button(
"Send",
variant="primary",
scale=1,
size="lg"
)
# Example prompts
with gr.Row():
gr.Markdown("### ✨ Try these prompts:")
with gr.Row():
examples = gr.Examples(
examples=[
["Tell me about artificial intelligence"],
["What are some tips for productivity?"],
["Explain quantum computing simply"],
["Write a short poem about nature"]
],
inputs=message_input,
label=None
)
# Event handlers
submit_btn.click(
fn=chatbot_fn,
inputs=[
message_input,
chatbot,
system_prompt,
max_length,
temperature,
top_p
],
outputs=chatbot,
api_visibility="public",
show_progress="minimal"
)
message_input.submit(
fn=chatbot_fn,
inputs=[
message_input,
chatbot,
system_prompt,
max_length,
temperature,
top_p
],
outputs=chatbot,
api_visibility="public",
show_progress="minimal"
)
clear_btn.click(
fn=clear_chat,
inputs=None,
outputs=chatbot,
api_visibility="public"
)
# Launch the application with Gradio 6 parameters
demo.launch(
theme=chatbot_theme,
css=custom_css,
server_name="0.0.0.0",
server_port=7860,
show_error=True,
quiet=False,
footer_links=[
{"label": "Built with anycoder", "url": "https://huggingface.co/spaces/akhaliq/anycoder"},
{"label": "Model: DialoGPT-medium", "url": "https://huggingface.co/microsoft/DialoGPT-medium"},
{"label": "Gradio", "url": "https://gradio.app"},
{"label": "HuggingFace", "url": "https://huggingface.co"}
]
)