chatbot / simple_chatbot.py
Deva1211's picture
🎯 COMPLETE TRANSFORMATION: Simple Emotion-Aware AI Assistant
01d262c
#!/usr/bin/env python3
"""
Simple Emotion-Aware Chatbot
This chatbot:
1. Gives direct, helpful answers to questions
2. Detects user emotions using sentiment analysis
3. Responds with appropriate tone and emojis
4. Uses Mistral-7B-AWQ for high-quality responses
5. No therapy-style conversations - just helpful assistance
"""
import gradio as gr
import torch
from transformers import AutoModelForCausalLM, AutoTokenizer, pipeline
import re
import random
print("πŸ€– Loading Simple Emotion-Aware Chatbot...")
# === MODEL CONFIGURATION ===
MODEL_ID = "TheBloke/Mistral-7B-Instruct-v0.2-AWQ"
# Load main chat model
try:
print("πŸ”„ Loading Mistral-7B-AWQ model...")
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
device_map="auto",
torch_dtype=torch.float16,
low_cpu_mem_usage=True,
trust_remote_code=True
)
print("βœ… Mistral model loaded successfully!")
except Exception as e:
print(f"⚠️ Mistral AWQ failed: {e}")
print("πŸ“¦ Falling back to DialoGPT...")
MODEL_ID = "microsoft/DialoGPT-medium"
tokenizer = AutoTokenizer.from_pretrained(MODEL_ID)
model = AutoModelForCausalLM.from_pretrained(
MODEL_ID,
torch_dtype=torch.float16 if torch.cuda.is_available() else torch.float32,
low_cpu_mem_usage=True
)
print("βœ… DialoGPT fallback loaded!")
# Add pad token if needed
if tokenizer.pad_token is None:
tokenizer.pad_token = tokenizer.eos_token
# Load sentiment analysis model
try:
print("πŸ”„ Loading sentiment analysis model...")
sentiment_analyzer = pipeline(
"sentiment-analysis",
model="distilbert-base-uncased-finetuned-sst-2-english",
return_all_scores=True
)
print("βœ… Sentiment analyzer loaded!")
except Exception as e:
print(f"⚠️ Sentiment analyzer failed: {e}")
sentiment_analyzer = None
# === SIMPLE SYSTEM PROMPT ===
SIMPLE_SYSTEM_PROMPT = """You are a helpful AI assistant. Answer questions directly and clearly. Be friendly and concise. If someone seems upset, be understanding. If they seem happy, match their energy. Keep responses to 1-2 sentences unless more detail is needed."""
# === EMOTION DETECTION ===
def detect_emotion(text):
"""Detect user's emotion from their message"""
if not sentiment_analyzer:
return "neutral", 0.5
try:
results = sentiment_analyzer(text)[0]
# Convert to our emotion system
for result in results:
if result['label'] == 'POSITIVE':
return "positive", result['score']
elif result['label'] == 'NEGATIVE':
return "negative", result['score']
return "neutral", 0.5
except:
return "neutral", 0.5
# === EMOJI SELECTION ===
def get_appropriate_emoji(emotion, confidence):
"""Select appropriate emoji based on detected emotion"""
if confidence < 0.6:
return "😊" # Default friendly
if emotion == "positive":
return random.choice(["😊", "πŸ˜„", "πŸŽ‰", "πŸ‘", "✨"])
elif emotion == "negative":
return random.choice(["πŸ˜”", "πŸ’™", "πŸ«‚", "😞", "πŸ’—"])
else:
return random.choice(["😊", "πŸ‘‹", "πŸ€”", "πŸ’­"])
# === RESPONSE TONE ADJUSTMENT ===
def adjust_response_tone(response, emotion, confidence):
"""Adjust response tone based on detected emotion"""
if confidence < 0.6:
return response # Keep original tone for unclear emotions
if emotion == "negative":
# Add gentle, understanding tone
supportive_starters = [
"I understand that's tough. ",
"That sounds challenging. ",
"I hear you. ",
"I can see why that would be difficult. "
]
if not any(starter.lower() in response.lower()[:20] for starter in supportive_starters):
return f"{random.choice(supportive_starters)}{response}"
elif emotion == "positive":
# Add enthusiastic tone
positive_starters = [
"That's great! ",
"Wonderful! ",
"That sounds amazing! ",
"How exciting! "
]
if "great" not in response.lower() and "wonderful" not in response.lower():
return f"{random.choice(positive_starters)}{response}"
return response
# === MAIN RESPONSE FUNCTION ===
def generate_response(message, history):
"""Generate a simple, emotion-aware response"""
try:
# 1. Detect user emotion
emotion, confidence = detect_emotion(message)
print(f"Detected emotion: {emotion} (confidence: {confidence:.2f})")
# 2. Prepare conversation for model
messages = [
{"role": "system", "content": SIMPLE_SYSTEM_PROMPT},
{"role": "user", "content": message}
]
# Add recent history (last 2 exchanges only)
if history:
recent_history = history[-2:]
full_messages = [{"role": "system", "content": SIMPLE_SYSTEM_PROMPT}]
for user_msg, bot_msg in recent_history:
full_messages.append({"role": "user", "content": user_msg})
if bot_msg:
full_messages.append({"role": "assistant", "content": bot_msg})
full_messages.append({"role": "user", "content": message})
messages = full_messages
# 3. Generate response using model
if "mistral" in MODEL_ID.lower():
# Use Mistral chat template
conversation = tokenizer.apply_chat_template(
messages,
tokenize=False,
add_generation_prompt=True
)
else:
# Simple format for DialoGPT
conversation = f"{message}{tokenizer.eos_token}"
# Tokenize
inputs = tokenizer(
conversation,
return_tensors="pt",
truncation=True,
max_length=1024,
padding=True
)
# Generate with optimized parameters
with torch.no_grad():
outputs = model.generate(
inputs['input_ids'].to(model.device),
attention_mask=inputs.get('attention_mask', None),
max_new_tokens=80, # Short, concise responses
temperature=0.7, # Balanced creativity
top_p=0.9, # Focused responses
top_k=50, # Good variety
repetition_penalty=1.1,
do_sample=True,
pad_token_id=tokenizer.pad_token_id or tokenizer.eos_token_id,
eos_token_id=tokenizer.eos_token_id
)
# Decode response
raw_response = tokenizer.decode(
outputs[:, inputs['input_ids'].shape[-1]:][0],
skip_special_tokens=True
).strip()
# 4. Clean up response
response = raw_response.replace("Human:", "").replace("Assistant:", "").strip()
# Remove any leftover conversation markers
response = re.sub(r'^(User|Bot|AI|Assistant):\s*', '', response)
# 5. Adjust tone based on emotion
response = adjust_response_tone(response, emotion, confidence)
# 6. Add appropriate emoji
emoji = get_appropriate_emoji(emotion, confidence)
# 7. Ensure response isn't too long or repetitive
if len(response.split()) > 50:
sentences = response.split('.')
response = '. '.join(sentences[:2]) + '.'
# 8. Final formatting
if response and not response.endswith(('!', '?', '.')):
response += '.'
final_response = f"{response} {emoji}"
return final_response
except Exception as e:
print(f"Error generating response: {e}")
# Simple fallback based on emotion
emotion, confidence = detect_emotion(message)
emoji = get_appropriate_emoji(emotion, confidence)
if emotion == "negative":
return f"I understand you're going through something difficult. How can I help? {emoji}"
elif emotion == "positive":
return f"That's wonderful to hear! What would you like to know? {emoji}"
else:
return f"I'm here to help! What can I assist you with? {emoji}"
# === GRADIO INTERFACE ===
def chatbot_response(message, history):
"""Wrapper for Gradio interface"""
if not message.strip():
return history
response = generate_response(message, history)
history.append([message, response])
return history
# Create Gradio interface
with gr.Blocks(title="Simple AI Assistant") as demo:
gr.Markdown("# πŸ€– Simple AI Assistant")
gr.Markdown("""
**A helpful AI assistant that:**
- Answers your questions directly and clearly
- Detects your emotions and responds appropriately
- Uses emojis to match the conversation tone
- Keeps responses concise and useful
""")
chatbot = gr.Chatbot(
value=[],
height=500,
type="messages" # Modern Gradio format
)
msg = gr.Textbox(
placeholder="Ask me anything! I'll help you out 😊",
container=False,
scale=7
)
with gr.Row():
clear = gr.Button("Clear Chat", variant="secondary")
# Settings (simplified)
with gr.Accordion("βš™οΈ Settings", open=False):
gr.Markdown("*The chatbot is optimized for speed and quality by default.*")
# Event handlers
msg.submit(
lambda m, h: ("", chatbot_response(m, h)),
[msg, chatbot],
[msg, chatbot]
)
clear.click(lambda: [], None, chatbot)
# Example conversations
gr.Examples(
examples=[
"What's the weather like today?",
"I'm feeling stressed about work",
"Can you help me with Python code?",
"I just got a promotion!",
"How do I make pasta?"
],
inputs=msg,
label="Try these examples:"
)
if __name__ == "__main__":
print("πŸš€ Starting Simple AI Assistant...")
demo.launch(share=True)
print("🌐 Chatbot is running!")