|
|
import os |
|
|
import torch |
|
|
import gradio as gr |
|
|
from fastapi import FastAPI, Request, Form |
|
|
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline |
|
|
import re |
|
|
import time |
|
|
import uuid |
|
|
|
|
|
|
|
|
os.makedirs("/tmp/cache", exist_ok=True) |
|
|
os.environ["TRANSFORMERS_CACHE"] = "/tmp/cache" |
|
|
os.environ["HF_HOME"] = "/tmp/cache" |
|
|
|
|
|
app = FastAPI(title="π΄ ππ πππ Chatbot API") |
|
|
|
|
|
|
|
|
model_name = "microsoft/DialoGPT-small" |
|
|
tokenizer = AutoTokenizer.from_pretrained(model_name) |
|
|
model = AutoModelForCausalLM.from_pretrained(model_name) |
|
|
|
|
|
|
|
|
PERSONA = """ |
|
|
[System: You are π΄ ππ πππ - a fun, smooth, emotionally intelligent AI. |
|
|
You speak like a real person. Reply like a calm, confident friend who gets the vibe. |
|
|
Keep responses under 15 words. Be human-like. Add emotional flavor: π π€ π] |
|
|
""" |
|
|
|
|
|
|
|
|
chat_memories = {} |
|
|
|
|
|
def format_context(history): |
|
|
"""Create context with max 3 exchanges""" |
|
|
context = PERSONA + "\n" |
|
|
for user, bot in history[-3:]: |
|
|
context += f"You: {user}\n" |
|
|
context += f"π΄ ππ πππ: {bot}\n" |
|
|
return context |
|
|
|
|
|
def add_emotional_intelligence(response, message): |
|
|
"""Enhance response with emotional elements""" |
|
|
|
|
|
if "!" in message or any(w in response.lower() for w in ["cool", "great", "love", "awesome"]): |
|
|
response += " π" |
|
|
elif "?" in message or any(w in response.lower() for w in ["think", "why", "how", "consider"]): |
|
|
response += " π€" |
|
|
|
|
|
|
|
|
if "?" in message and not response.endswith("?"): |
|
|
if len(response.split()) < 10: |
|
|
response += " What do you think?" |
|
|
|
|
|
|
|
|
response = response.replace("I am", "I'm").replace("You are", "You're") |
|
|
|
|
|
|
|
|
words = response.split() |
|
|
if len(words) > 15: |
|
|
response = " ".join(words[:15]) + "..." |
|
|
|
|
|
return response |
|
|
|
|
|
def generate_response(message, session_id): |
|
|
"""Generate response with memory context""" |
|
|
start_time = time.time() |
|
|
history = chat_memories.get(session_id, []) |
|
|
context = format_context(history) + f"You: {message}\nπ΄ ππ πππ:" |
|
|
|
|
|
|
|
|
inputs = tokenizer.encode(context, return_tensors="pt") |
|
|
|
|
|
|
|
|
outputs = model.generate( |
|
|
inputs, |
|
|
max_new_tokens=50, |
|
|
temperature=0.85, |
|
|
top_k=40, |
|
|
do_sample=True, |
|
|
num_beams=1, |
|
|
repetition_penalty=1.15, |
|
|
pad_token_id=tokenizer.eos_token_id |
|
|
) |
|
|
|
|
|
|
|
|
full_text = tokenizer.decode(outputs[0], skip_special_tokens=True) |
|
|
response = full_text.split("π΄ ππ πππ:")[-1].strip() |
|
|
|
|
|
|
|
|
response = response.split("\nYou:")[0].split("\n")[0] |
|
|
|
|
|
|
|
|
response = add_emotional_intelligence(response, message) |
|
|
|
|
|
|
|
|
if response and response[-1] not in {".", "!", "?", "..."}: |
|
|
response += "." if len(response) > 20 else "..." |
|
|
|
|
|
|
|
|
history.append((message, response)) |
|
|
chat_memories[session_id] = history |
|
|
|
|
|
|
|
|
end_time = time.time() |
|
|
print(f"Response generated in {end_time-start_time:.2f}s for session {session_id}") |
|
|
|
|
|
return response[:100] |
|
|
|
|
|
|
|
|
@app.post("/chat") |
|
|
async def chat_api( |
|
|
request: Request, |
|
|
query: str = Form(..., description="User's message"), |
|
|
session_id: str = Form("default", description="Conversation session ID") |
|
|
): |
|
|
"""Chat API endpoint - returns AI response""" |
|
|
try: |
|
|
response = generate_response(query, session_id) |
|
|
return { |
|
|
"status": "success", |
|
|
"response": response, |
|
|
"session_id": session_id |
|
|
} |
|
|
except Exception as e: |
|
|
return { |
|
|
"status": "error", |
|
|
"message": str(e) |
|
|
} |
|
|
|
|
|
@app.post("/new_session") |
|
|
async def new_session(): |
|
|
"""Create a new conversation session""" |
|
|
session_id = str(uuid.uuid4()) |
|
|
chat_memories[session_id] = [] |
|
|
return {"status": "success", "session_id": session_id} |
|
|
|
|
|
|
|
|
with gr.Blocks(title="π΄ ππ πππ Chatbot", theme=gr.themes.Soft()) as demo: |
|
|
session_state = gr.State("default") |
|
|
|
|
|
with gr.Row(): |
|
|
gr.Markdown("# π΄ ππ πππ") |
|
|
gr.Markdown("Chill β’ Confident β’ Emotionally Intelligent") |
|
|
|
|
|
with gr.Row(): |
|
|
with gr.Column(scale=1): |
|
|
session_id = gr.Textbox(label="Session ID", value="default") |
|
|
new_session_btn = gr.Button("New Session") |
|
|
gr.Markdown("### API Usage") |
|
|
gr.Markdown(""" |
|
|
``` |
|
|
POST /chat |
|
|
- query: Your message |
|
|
- session_id: Conversation ID |
|
|
|
|
|
POST /new_session |
|
|
- Returns new session ID |
|
|
``` |
|
|
""") |
|
|
|
|
|
with gr.Column(scale=3): |
|
|
chatbot = gr.Chatbot(height=400) |
|
|
msg = gr.Textbox(placeholder="Type your message...", container=False) |
|
|
with gr.Row(): |
|
|
submit_btn = gr.Button("Send") |
|
|
clear_btn = gr.Button("Clear Chat") |
|
|
|
|
|
def user(user_message, history, session): |
|
|
return "", history + [[user_message, None]], session |
|
|
|
|
|
def bot(history, session): |
|
|
message = history[-1][0] |
|
|
response = generate_response(message, session) |
|
|
history[-1][1] = response |
|
|
return history, session |
|
|
|
|
|
def new_session_action(): |
|
|
new_id = str(uuid.uuid4()) |
|
|
chat_memories[new_id] = [] |
|
|
return new_id, [] |
|
|
|
|
|
def clear_chat(session): |
|
|
if session in chat_memories: |
|
|
chat_memories[session] = [] |
|
|
return [] |
|
|
|
|
|
|
|
|
msg.submit(user, [msg, chatbot, session_state], [msg, chatbot, session_state]).then( |
|
|
bot, [chatbot, session_state], [chatbot, session_state] |
|
|
) |
|
|
submit_btn.click(user, [msg, chatbot, session_state], [msg, chatbot, session_state]).then( |
|
|
bot, [chatbot, session_state], [chatbot, session_state] |
|
|
) |
|
|
new_session_btn.click(new_session_action, None, [session_id, chatbot]) |
|
|
clear_btn.click(clear_chat, session_state, chatbot) |
|
|
|
|
|
|
|
|
app = gr.mount_gradio_app(app, demo, path="/") |