Spaces:
Sleeping
Sleeping
File size: 4,290 Bytes
105af00 636ecb7 bbb6f8a 636ecb7 9a1c94f 636ecb7 2879585 9a1c94f 636ecb7 9a1c94f 105af00 636ecb7 9a1c94f 105af00 636ecb7 2879585 636ecb7 2879585 636ecb7 105af00 636ecb7 bbb6f8a 636ecb7 bbb6f8a 636ecb7 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 | import gradio as gr
from transformers import pipeline
import re
from typing import Dict, List, Tuple
# ========== STEP 1: INITIALIZE OPTIMIZED MODELS ==========
# Using smaller, faster models
chatbot = pipeline(
"text-generation",
model="distilgpt2", # 6-layer distilled version of GPT-2 (faster)
device="cpu" # Remove if you have GPU
)
# Lightweight translation model for Bangla
translator = pipeline(
"translation",
model="Helsinki-NLP/opus-mt-bn-en", # Faster than nllb
device="cpu"
)
# ========== STEP 2: OPTIMIZED DATA STRUCTURES ==========
resume_data = {
# Strings instead of nested dicts for faster access
"name": "MD. Abdur Rahim (Ratul)",
"profession": "Programmer",
"education": "\n".join([
"SSC: Independent Model School, Bogra (4.29)",
"HSC: Shahzadpur Govt. College (4.92)",
"BSc CSE: Khwaja Yunus Ali University (Current, 3.75)"
]),
"skills": "\n".join([
"Programming: C++, C",
"Core: Algorithms, Problem Solving",
"Other: SEO, Communication"
]),
"coding": "\n".join([
"Codeforces: 831 rating (RATUL_CSE18)",
"Beecrowd: 448.58 points",
"Profile: codeforces.com/profile/ratul_cse18"
]),
"contact": "\n".join([
"Phone: +8801786500883",
"Email: ratul1.cse18kyau@gmail.com",
"Location: Shahzadpur, Sirajganj"
])
}
# ========== STEP 3: PRE-COMPUTED ANSWERS ==========
QA = {
# Direct matches (fastest)
"who": f"I'm {resume_data['name']}, a {resume_data['profession']}.",
"education": resume_data["education"],
"skills": resume_data["skills"],
"code": resume_data["coding"],
"contact": resume_data["contact"],
# Pattern-based
"single": "I don't share relationship status.",
"age": "That's private. Ask about my coding skills!",
"project": "Ratul offers affordable projects. Interested?",
# Default
"fallback": "Ask about my education, skills, or coding profiles!"
}
# ========== STEP 4: OPTIMIZED PROCESSING ==========
ABUSIVE_WORDS = {"fuck", "ass", "bitch", "shit", "rape"} # Set for O(1) lookups
def is_abusive(text: str) -> bool:
return any(word in text.lower() for word in ABUSIVE_WORDS)
def detect_bangla(text: str) -> bool:
return any('\u0980' <= char <= '\u09FF' for char in text)
def fast_translate(text: str) -> str:
if detect_bangla(text):
return translator(text, max_length=60)[0]['translation_text']
return text
def generate_response(prompt: str) -> str:
response = chatbot(
prompt,
max_new_tokens=100,
do_sample=False, # Faster than sampling
num_beams=1 # Faster than beam search
)
return response[0]['generated_text'].split("Trisha:")[-1].strip()
# ========== STEP 5: OPTIMIZED CHAT LOGIC ==========
def answer_question(question: str) -> str:
question = question.strip().lower()
question_en = fast_translate(question)
# 1. Safety check
if is_abusive(question_en):
return "⚠️ Keep it professional or I'll disconnect!"
# 2. Check pre-computed answers (fast path)
if "who" in question_en: return QA["who"]
if "educat" in question_en: return QA["education"]
if "skill" in question_en: return QA["skills"]
if "code" in question_en: return QA["code"]
if "contact" in question_en: return QA["contact"]
if "single" in question_en: return QA["single"]
if "age" in question_en or "birth" in question_en: return QA["age"]
if "project" in question_en: return QA["project"]
# 3. Generate only when necessary (slower path)
prompt = f"""Trisha (Ratul's Assistant):
Q: {question_en}
A:"""
return generate_response(prompt)
# ========== STEP 6: LIGHTNING-FAST GRADIO UI ==========
with gr.Blocks(title="⚡ Trisha Chatbot") as demo:
gr.Markdown(f"## 🤖 Hi! I'm Trisha, {resume_data['name']}'s assistant")
with gr.Row():
user_input = gr.Textbox(label="Ask anything...", placeholder="Type in English/Bangla")
output = gr.Textbox(label="Response", lines=5)
user_input.submit(
fn=answer_question,
inputs=user_input,
outputs=output,
api_name="chat"
)
if __name__ == "__main__":
demo.launch(server_port=7860) |