Spaces:
Running
Running
File size: 4,701 Bytes
4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 562c5b1 4176008 64fd08f 0c75769 4176008 0c75769 4176008 2fb7319 4176008 2fb7319 0c75769 4176008 f60e7b7 0c75769 4176008 562c5b1 4176008 562c5b1 4176008 | 1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 | import os
import gradio as gr
import requests
import torch
from dotenv import load_dotenv
from transformers import AutoTokenizer, AutoModelForCausalLM, pipeline
# --------------------------------------------------
# LOAD ENVIRONMENT VARIABLES
# --------------------------------------------------
load_dotenv(override=True)
PUSHOVER_TOKEN = os.getenv("PUSHOVER_TOKEN")
PUSHOVER_USER = os.getenv("PUSHOVER_USER")
# --------------------------------------------------
# MODEL CONFIGURATION (QWEN2 – CAUSAL LM)
# --------------------------------------------------
MODEL_NAME = "Qwen/Qwen2-1.5B-Instruct"
tokenizer = AutoTokenizer.from_pretrained(MODEL_NAME)
model = AutoModelForCausalLM.from_pretrained(
MODEL_NAME,
device_map="auto",
dtype=torch.float16,
)
generator = pipeline(
"text-generation",
model=model,
tokenizer=tokenizer,
)
# --------------------------------------------------
# FULL SUMMARY AND LINKEDIN PROFILE
# --------------------------------------------------
SUMMARY = """
Kushal Kachari is a passionate AI/ML engineer specializing in Generative AI and LLM applications.
Currently working at TCS on AI Agents and AI Tools projects. Experienced with Python, Streamlit,
Ollama, Azure, and building production-ready AI solutions. Actively learning Transformer
architecture, RAG, fine-tuning, and advanced ML algorithms.
"""
LINKEDIN_PROFILE = """
KUSHAL KACHARI
+91 84730 49979 ⋄ Goalpara, Assam, India
kushalkachari993@gmail.com ⋄ kushal.kachari@tcs.com
EDUCATION
B.Tech (Computer Science and Engineering), Jorhat Engineering College (2020–2024)
CGPA: 8.38
B.Sc (Programming and Data Science), IIT Madras (2020–2024)
CGPA: 6.5
SKILLS
Python, Java, TensorFlow, Keras, Firebase, DBMS, Business Analysis
EXPERIENCE
AI/ML Engineer – TCS
Internships at NRL, IIIT Guwahati
PROJECTS
• Assamese POS Tagging
• Multi-Fingerprint Attendance System
• Flower Classification Website
HONORS
• Runner Up – IIT Madras Data Science Alphathon
• Research Consultant – WorldQuant BRAIN
"""
# --------------------------------------------------
# PUSH NOTIFICATION FUNCTION
# --------------------------------------------------
def push_notification(text):
if PUSHOVER_TOKEN and PUSHOVER_USER:
try:
requests.post(
"https://api.pushover.net/1/messages.json",
data={
"token": PUSHOVER_TOKEN,
"user": PUSHOVER_USER,
"message": text,
},
timeout=5,
)
except Exception:
pass
# --------------------------------------------------
# TOOLS (LOGIC PRESERVED)
# --------------------------------------------------
def record_user_details(email, name="Not provided", notes="Not provided"):
push_notification(f"Lead captured | {email} | {name} | {notes}")
return "Thanks! I’ve saved your contact details."
def record_unknown_question(question):
push_notification(f"Unknown question: {question}")
return "I’ve noted your question and will get back to you later."
# --------------------------------------------------
# BOT CLASS
# --------------------------------------------------
class KushalBot:
def __init__(self):
self.name = "Kushal Kachari"
def system_prompt(self):
return f"""
You are {self.name}, an AI/ML Engineer at TCS.
Answer questions strictly based on the information below.
Be professional, concise, and factual.
If a user shares contact details, acknowledge politely.
If you cannot answer, say so clearly.
## Summary
{SUMMARY}
## Profile
{LINKEDIN_PROFILE}
"""
def chat(self, message, history):
prompt = "You are Kushal Kachari, an AI/ML engineer.\n\n"
for msg in history[-20:]:
if msg["role"] == "user":
prompt += f"User: {msg['content']}\n"
else:
prompt += f"Assistant: {msg['content']}\n"
prompt += f"User: {message}\nAssistant:"
inputs = tokenizer(prompt, return_tensors="pt").to(model.device)
output = model.generate(
**inputs,
max_new_tokens=256,
temperature=0.7,
do_sample=True,
)
response =tokenizer.decode(output[0], skip_special_tokens=True)
return response.split("Assistant:")[-1].strip()
# --------------------------------------------------
# GRADIO UI
# --------------------------------------------------
if __name__ == "__main__":
bot = KushalBot()
gr.ChatInterface(
fn=bot.chat,
title="Chat with Kushal Kachari",
description="AI/ML Engineer | Qwen2-1.5B | Hugging Face Spaces",
).launch()
|