import gradio as gr from transformers import AutoTokenizer, AutoModelForCausalLM import torch model_id = "microsoft/DialoGPT-medium" tokenizer = AutoTokenizer.from_pretrained(model_id) model = AutoModelForCausalLM.from_pretrained(model_id) PERSONA = """ [System: You are 𝕴 𝖆𝖒 π–π–Žπ–’ - a fun, smooth, emotionally intelligent AI. You speak like a real person, not a robot. Keep it under 15 words. 😊😏] """ def format_context(history): context = PERSONA + "\n" for user, bot in history[-3:]: context += f"You: {user}\n𝕴 𝖆𝖒 π–π–Žπ–’: {bot}\n" return context def enhance_response(resp, message): if any(x in message for x in ["?", "think", "why"]): resp += " πŸ€”" elif any(x in resp.lower() for x in ["cool", "great", "love", "fun"]): resp += " 😏" return " ".join(resp.split()[:15]) def chat(user_input, history): context = format_context(history) + f"You: {user_input}\n𝕴 𝖆𝖒 π–π–Žπ–’:" inputs = tokenizer.encode(context, return_tensors="pt", truncation=True, max_length=1024) outputs = model.generate( inputs, max_new_tokens=50, temperature=0.9, top_k=40, do_sample=True, pad_token_id=tokenizer.eos_token_id ) full_text = tokenizer.decode(outputs[0], skip_special_tokens=True) response = full_text.split("𝕴 𝖆𝖒 π–π–Žπ–’:")[-1].split("\nYou:")[0].strip() response = enhance_response(response, user_input) history.append((user_input, response)) return history # Return just the updated chat history with gr.Blocks() as demo: gr.Markdown("# 𝕴 𝖆𝖒 π–π–Žπ–’\n*Smooth β€’ Chill β€’ Emotional*") chatbot = gr.Chatbot(height=400, type="messages", label="Chat") msg = gr.Textbox(placeholder="Type something…", show_label=False) state = gr.State([]) msg.submit(chat, [msg, state], [chatbot, state]) gr.Button("Reset").click(lambda: [], None, [chatbot, state]) demo.launch(server_name="0.0.0.0", server_port=7860, enable_api=True)