Spaces:
Sleeping
Sleeping
| import gradio as gr | |
| from transformers import pipeline | |
| import re | |
| from typing import Dict, List, Tuple | |
| # ========== STEP 1: INITIALIZE OPTIMIZED MODELS ========== | |
| # Using smaller, faster models | |
| chatbot = pipeline( | |
| "text-generation", | |
| model="distilgpt2", # 6-layer distilled version of GPT-2 (faster) | |
| device="cpu" # Remove if you have GPU | |
| ) | |
| # Lightweight translation model for Bangla | |
| translator = pipeline( | |
| "translation", | |
| model="Helsinki-NLP/opus-mt-bn-en", # Faster than nllb | |
| device="cpu" | |
| ) | |
| # ========== STEP 2: OPTIMIZED DATA STRUCTURES ========== | |
| resume_data = { | |
| # Strings instead of nested dicts for faster access | |
| "name": "MD. Abdur Rahim (Ratul)", | |
| "profession": "Programmer", | |
| "education": "\n".join([ | |
| "SSC: Independent Model School, Bogra (4.29)", | |
| "HSC: Shahzadpur Govt. College (4.92)", | |
| "BSc CSE: Khwaja Yunus Ali University (Current, 3.75)" | |
| ]), | |
| "skills": "\n".join([ | |
| "Programming: C++, C", | |
| "Core: Algorithms, Problem Solving", | |
| "Other: SEO, Communication" | |
| ]), | |
| "coding": "\n".join([ | |
| "Codeforces: 831 rating (RATUL_CSE18)", | |
| "Beecrowd: 448.58 points", | |
| "Profile: codeforces.com/profile/ratul_cse18" | |
| ]), | |
| "contact": "\n".join([ | |
| "Phone: +8801786500883", | |
| "Email: ratul1.cse18kyau@gmail.com", | |
| "Location: Shahzadpur, Sirajganj" | |
| ]) | |
| } | |
| # ========== STEP 3: PRE-COMPUTED ANSWERS ========== | |
| QA = { | |
| # Direct matches (fastest) | |
| "who": f"I'm {resume_data['name']}, a {resume_data['profession']}.", | |
| "education": resume_data["education"], | |
| "skills": resume_data["skills"], | |
| "code": resume_data["coding"], | |
| "contact": resume_data["contact"], | |
| # Pattern-based | |
| "single": "I don't share relationship status.", | |
| "age": "That's private. Ask about my coding skills!", | |
| "project": "Ratul offers affordable projects. Interested?", | |
| # Default | |
| "fallback": "Ask about my education, skills, or coding profiles!" | |
| } | |
| # ========== STEP 4: OPTIMIZED PROCESSING ========== | |
| ABUSIVE_WORDS = {"fuck", "ass", "bitch", "shit", "rape"} # Set for O(1) lookups | |
| def is_abusive(text: str) -> bool: | |
| return any(word in text.lower() for word in ABUSIVE_WORDS) | |
| def detect_bangla(text: str) -> bool: | |
| return any('\u0980' <= char <= '\u09FF' for char in text) | |
| def fast_translate(text: str) -> str: | |
| if detect_bangla(text): | |
| return translator(text, max_length=60)[0]['translation_text'] | |
| return text | |
| def generate_response(prompt: str) -> str: | |
| response = chatbot( | |
| prompt, | |
| max_new_tokens=100, | |
| do_sample=False, # Faster than sampling | |
| num_beams=1 # Faster than beam search | |
| ) | |
| return response[0]['generated_text'].split("Trisha:")[-1].strip() | |
| # ========== STEP 5: OPTIMIZED CHAT LOGIC ========== | |
| def answer_question(question: str) -> str: | |
| question = question.strip().lower() | |
| question_en = fast_translate(question) | |
| # 1. Safety check | |
| if is_abusive(question_en): | |
| return "⚠️ Keep it professional or I'll disconnect!" | |
| # 2. Check pre-computed answers (fast path) | |
| if "who" in question_en: return QA["who"] | |
| if "educat" in question_en: return QA["education"] | |
| if "skill" in question_en: return QA["skills"] | |
| if "code" in question_en: return QA["code"] | |
| if "contact" in question_en: return QA["contact"] | |
| if "single" in question_en: return QA["single"] | |
| if "age" in question_en or "birth" in question_en: return QA["age"] | |
| if "project" in question_en: return QA["project"] | |
| # 3. Generate only when necessary (slower path) | |
| prompt = f"""Trisha (Ratul's Assistant): | |
| Q: {question_en} | |
| A:""" | |
| return generate_response(prompt) | |
| # ========== STEP 6: LIGHTNING-FAST GRADIO UI ========== | |
| with gr.Blocks(title="⚡ Trisha Chatbot") as demo: | |
| gr.Markdown(f"## 🤖 Hi! I'm Trisha, {resume_data['name']}'s assistant") | |
| with gr.Row(): | |
| user_input = gr.Textbox(label="Ask anything...", placeholder="Type in English/Bangla") | |
| output = gr.Textbox(label="Response", lines=5) | |
| user_input.submit( | |
| fn=answer_question, | |
| inputs=user_input, | |
| outputs=output, | |
| api_name="chat" | |
| ) | |
| if __name__ == "__main__": | |
| demo.launch(server_port=7860) |