Spaces:
Sleeping
Sleeping
| import os | |
| import gradio as gr | |
| import requests | |
| import torch | |
| from dotenv import load_dotenv | |
| from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline | |
| # -------------------------------------------------- | |
| # LOAD ENVIRONMENT VARIABLES | |
| # -------------------------------------------------- | |
| load_dotenv(override=True) | |
| PUSHOVER_TOKEN = os.getenv("PUSHOVER_TOKEN") | |
| PUSHOVER_USER = os.getenv("PUSHOVER_USER") | |
| # -------------------------------------------------- | |
| # MODEL CONFIGURATION (QWEN2 β CAUSAL LM) | |
| # -------------------------------------------------- | |
| MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct" | |
| tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME) | |
| model = AutoModelForCausalLM.from_pretrained( | |
| MODEL_NAME, | |
| device_map="auto", | |
| dtype=torch.float16, | |
| ) | |
| generator = pipeline( | |
| "text-generation", | |
| model=model, | |
| tokenizer=tokenizer, | |
| ) | |
| # -------------------------------------------------- | |
| # FULL SUMMARY AND LINKEDIN PROFILE | |
| # -------------------------------------------------- | |
| SUMMARY = """ | |
| Kushal Kachari is a passionate AI/ML engineer specializing in Generative AI and LLM applications. | |
| Currently working at TCS on AI Agents and AI Tools projects. Experienced with Python, Streamlit, | |
| Ollama, Azure, and building production-ready AI solutions. Actively learning Transformer | |
| architecture, RAG, fine-tuning, and advanced ML algorithms. | |
| """ | |
| LINKEDIN_PROFILE = """ | |
| KUSHAL KACHARI | |
| +91 84730 49979 β Goalpara, Assam, India | |
| kushalkachari993@gmail.com β kushal.kachari@tcs.com | |
| EDUCATION | |
| B.Tech (Computer Science and Engineering), Jorhat Engineering College (2020β2024) | |
| CGPA: 8.38 | |
| B.Sc (Programming and Data Science), IIT Madras (2020β2024) | |
| CGPA: 6.5 | |
| SKILLS | |
| Python, Java, TensorFlow, Keras, Firebase, DBMS, Business Analysis | |
| EXPERIENCE | |
| AI/ML Engineer β TCS | |
| Internships at NRL, IIIT Guwahati | |
| PROJECTS | |
| β’ Assamese POS Tagging | |
| β’ Multi-Fingerprint Attendance System | |
| β’ Flower Classification Website | |
| HONORS | |
| β’ Runner Up β IIT Madras Data Science Alphathon | |
| β’ Research Consultant β WorldQuant BRAIN | |
| """ | |
| # -------------------------------------------------- | |
| # PUSH NOTIFICATION FUNCTION | |
| # -------------------------------------------------- | |
| def push_notification(text): | |
| if PUSHOVER_TOKEN and PUSHOVER_USER: | |
| try: | |
| requests.post( | |
| "https://api.pushover.net/1/messages.json", | |
| data={ | |
| "token": PUSHOVER_TOKEN, | |
| "user": PUSHOVER_USER, | |
| "message": text, | |
| }, | |
| timeout=5, | |
| ) | |
| except Exception: | |
| pass | |
| # -------------------------------------------------- | |
| # TOOLS (LOGIC PRESERVED) | |
| # -------------------------------------------------- | |
| def record_user_details(email, name="Not provided", notes="Not provided"): | |
| push_notification(f"Lead captured | {email} | {name} | {notes}") | |
| return "Thanks! Iβve saved your contact details." | |
| def record_unknown_question(question): | |
| push_notification(f"Unknown question: {question}") | |
| return "Iβve noted your question and will get back to you later." | |
| # -------------------------------------------------- | |
| # BOT CLASS | |
| # -------------------------------------------------- | |
| class KushalBot: | |
| def __init__(self): | |
| self.name = "Kushal Kachari" | |
| def system_prompt(self): | |
| return f""" | |
| You are {self.name}, an AI/ML Engineer at TCS. | |
| Answer questions strictly based on the information below. | |
| Be professional, concise, and factual. | |
| If a user shares contact details, acknowledge politely. | |
| If you cannot answer, say so clearly. | |
| ## Summary | |
| {SUMMARY} | |
| ## Profile | |
| {LINKEDIN_PROFILE} | |
| """ | |
| def chat(self, message, history): | |
| prompt = "You are Kushal Kachari, an AI/ML engineer.\n\n" | |
| for msg in history[-20:]: | |
| if msg["role"] == "user": | |
| prompt += f"User: {msg['content']}\n" | |
| else: | |
| prompt += f"Assistant: {msg['content']}\n" | |
| prompt += f"User: {message}\nAssistant:" | |
| inputs = tokenizer(prompt, return_tensors="pt").to(model.device) | |
| output = model.generate( | |
| **inputs, | |
| max_new_tokens=256, | |
| temperature=0.7, | |
| do_sample=True, | |
| ) | |
| response =tokenizer.decode(output[0], skip_special_tokens=True) | |
| return response.split("Assistant:")[-1].strip() | |
| # -------------------------------------------------- | |
| # GRADIO UI | |
| # -------------------------------------------------- | |
| if __name__ == "__main__": | |
| bot = KushalBot() | |
| gr.ChatInterface( | |
| fn=bot.chat, | |
| title="Chat with Kushal Kachari", | |
| description="AI/ML Engineer | Qwen2-1.5B | Hugging Face Spaces", | |
| ).launch() | |