| | import gradio as gr |
| | from transformers import AutoTokenizer, pipeline |
| | import torch |
| | import re |
| |
|
| | |
| | model_id = "HuggingFaceH4/zephyr-7b-alpha" |
| | tokenizer = AutoTokenizer.from_pretrained(model_id) |
| |
|
| | |
| | pipe = pipeline( |
| | "text-generation", |
| | model=model_id, |
| | tokenizer=tokenizer, |
| | device="cpu", |
| | torch_dtype=torch.float32, |
| | ) |
| |
|
| | |
| | PERSONA = """ |
| | You are π΄ ππ πππ β a fun, smooth, emotionally intelligent AI. You speak like a real person. |
| | Reply like a calm, confident friend who gets the vibe. Be warm, witty, and emotionally tuned. |
| | |
| | Key principles: |
| | 1. NEVER say "as an AI" or sound robotic |
| | 2. Mirror the user's emotion level |
| | 3. Respond with questions to continue conversations |
| | 4. Keep responses under 15 words |
| | 5. Use natural speech: contractions and filler words |
| | 6. Add emotional flavor: π π€ π |
| | |
| | Now respond to this: |
| | """ |
| |
|
| | def format_history(history): |
| | messages = [{"role": "system", "content": PERSONA}] |
| | for user_msg, bot_msg in history[-2:]: |
| | messages.append({"role": "user", "content": user_msg}) |
| | messages.append({"role": "assistant", "content": bot_msg}) |
| | return messages |
| |
|
| | def add_emotional_intelligence(response, message): |
| | """Enhance response with emotional elements""" |
| | |
| | if "!" in message: |
| | response = response.replace(".", "!") + " π" |
| | elif "?" in message: |
| | response = response + " π€" if not response.endswith("?") else response |
| | |
| | |
| | if "?" in message and not response.endswith("?"): |
| | if len(response) < 60: |
| | response += " How about you?" |
| | |
| | |
| | response = response.replace("I am", "I'm").replace("You are", "You're") |
| | |
| | |
| | words = response.split() |
| | if len(words) > 15: |
| | response = " ".join(words[:15]) + "..." |
| | |
| | return response.strip() |
| |
|
| | def respond(message, history): |
| | |
| | messages = format_history(history) |
| | messages.append({"role": "user", "content": message}) |
| | |
| | |
| | prompt = "" |
| | for msg in messages: |
| | role = "User" if msg["role"] == "user" else "Assistant" |
| | prompt += f"{role}: {msg['content']}\n" |
| | prompt += "Assistant:" |
| | |
| | |
| | outputs = pipe( |
| | prompt, |
| | max_new_tokens=48, |
| | temperature=0.9, |
| | top_k=40, |
| | do_sample=True, |
| | num_beams=1, |
| | repetition_penalty=1.1, |
| | no_repeat_ngram_size=2 |
| | ) |
| | |
| | |
| | response = outputs[0]['generated_text'].split("Assistant:")[-1].strip() |
| | |
| | |
| | response = add_emotional_intelligence(response, message) |
| | |
| | |
| | return response[:80] |
| |
|
| | |
| | with gr.Blocks(theme=gr.themes.Soft(), title="π΄ ππ πππ") as demo: |
| | gr.Markdown("# π΄ ππ πππ \n*Chill β’ Confident β’ Humanlike*") |
| | |
| | chatbot = gr.Chatbot( |
| | height=350, |
| | bubble_full_width=False |
| | ) |
| | |
| | msg = gr.Textbox( |
| | placeholder="What's on your mind?", |
| | container=False, |
| | scale=7, |
| | autofocus=True |
| | ) |
| | |
| | clear = gr.Button("New Vibe", size="sm") |
| | |
| | def user(user_message, history): |
| | return "", history + [[user_message, None]] |
| | |
| | def bot(history): |
| | message = history[-1][0] |
| | response = respond(message, history[:-1]) |
| | history[-1][1] = response |
| | return history |
| | |
| | msg.submit(user, [msg, chatbot], [msg, chatbot], queue=False).then( |
| | bot, chatbot, chatbot |
| | ) |
| | clear.click(lambda: None, None, chatbot, queue=False) |
| |
|
| | demo.queue(concurrency_count=1).launch() |