krishnadhulipalla commited on
Commit
102dac3
·
1 Parent(s): 0890936

Updated UI & personal data

Browse files
.gitignore CHANGED
@@ -6,4 +6,5 @@ ui/node_modules/
6
  .DS_Store
7
  *.log
8
  .env
9
- /env
 
 
6
  .DS_Store
7
  *.log
8
  .env
9
+ /env
10
+ /.venv
Vector_storing.py CHANGED
@@ -3,11 +3,16 @@ import re
3
  import json
4
  import hashlib
5
  from pathlib import Path
 
6
 
7
- from langchain.text_splitter import RecursiveCharacterTextSplitter
8
- from langchain_community.vectorstores import FAISS
9
- from langchain_community.embeddings import HuggingFaceEmbeddings
10
 
 
 
 
 
 
11
 
12
  # === UTILS ===
13
  def hash_text(text: str) -> str:
@@ -16,11 +21,11 @@ def hash_text(text: str) -> str:
16
 
17
  # === MAIN FUNCTION ===
18
  def create_faiss_store(
19
- md_dir: str = "./personal_data",
20
  chunk_size: int = 1000,
21
  chunk_overlap: int = 250,
22
- persist_dir: str = "./backend/data/faiss_store",
23
- chunk_save_path: str = "./backend/data/all_chunks.json",
24
  min_chunk_chars: int = 50,
25
  ):
26
  """
@@ -114,11 +119,19 @@ def create_faiss_store(
114
  os.makedirs(save_path, exist_ok=True)
115
 
116
  # Embeddings + FAISS
117
- embeddings = HuggingFaceEmbeddings(
118
- model_name="sentence-transformers/all-MiniLM-L6-v2",
119
- model_kwargs={"device": "cpu"},
120
- encode_kwargs={"normalize_embeddings": True},
121
- )
 
 
 
 
 
 
 
 
122
 
123
  vector_store = FAISS.from_texts(
124
  texts=[c["text"] for c in all_chunks],
@@ -132,17 +145,11 @@ def create_faiss_store(
132
  print(f"📊 Stats → Chunks: {len(all_chunks)} | Avg length: {avg_len:.1f} characters")
133
 
134
  if failed_chunks:
135
- with open("failed_chunks.txt", "w", encoding="utf-8") as f:
136
  for line in failed_chunks:
137
  f.write(line + "\n")
138
  print("📝 Failed chunk IDs saved to failed_chunks.txt")
139
 
140
 
141
  if __name__ == "__main__":
142
- create_faiss_store(
143
- md_dir="./personal_data",
144
- chunk_size=1000,
145
- chunk_overlap=250,
146
- persist_dir="./backend/data/faiss_store",
147
- chunk_save_path="./backend/data/all_chunks.json",
148
- )
 
3
  import json
4
  import hashlib
5
  from pathlib import Path
6
+ import sys
7
 
8
+ # Ensure we can import from backend if running from root
9
+ sys.path.append(str(Path(__file__).resolve().parent))
 
10
 
11
+ from langchain_text_splitters import RecursiveCharacterTextSplitter
12
+ from langchain_community.vectorstores import FAISS
13
+ from langchain_huggingface import HuggingFaceEmbeddings
14
+ from langchain_openai import OpenAIEmbeddings
15
+ from backend import config
16
 
17
  # === UTILS ===
18
  def hash_text(text: str) -> str:
 
21
 
22
  # === MAIN FUNCTION ===
23
  def create_faiss_store(
24
+ md_dir: str = str(config.PERSONAL_DATA_DIR),
25
  chunk_size: int = 1000,
26
  chunk_overlap: int = 250,
27
+ persist_dir: str = str(config.FAISS_PATH.parent), # Save to parent of specific version
28
+ chunk_save_path: str = str(config.CHUNKS_PATH),
29
  min_chunk_chars: int = 50,
30
  ):
31
  """
 
119
  os.makedirs(save_path, exist_ok=True)
120
 
121
  # Embeddings + FAISS
122
+ if config.USE_OPENAI_EMBEDDING:
123
+ print(f"🔹 Using OpenAI Embeddings: {config.EMBEDDING_MODEL_NAME}")
124
+ embeddings = OpenAIEmbeddings(
125
+ model=config.EMBEDDING_MODEL_NAME,
126
+ openai_api_key=config.OPENAI_API_KEY
127
+ )
128
+ else:
129
+ print(f"🔹 Using HuggingFace Embeddings: {config.EMBEDDING_MODEL_NAME}")
130
+ embeddings = HuggingFaceEmbeddings(
131
+ model_name=config.EMBEDDING_MODEL_NAME,
132
+ model_kwargs={"device": "cpu"},
133
+ encode_kwargs={"normalize_embeddings": True},
134
+ )
135
 
136
  vector_store = FAISS.from_texts(
137
  texts=[c["text"] for c in all_chunks],
 
145
  print(f"📊 Stats → Chunks: {len(all_chunks)} | Avg length: {avg_len:.1f} characters")
146
 
147
  if failed_chunks:
148
+ with open(config.FAILED_CHUNKS_PATH, "w", encoding="utf-8") as f:
149
  for line in failed_chunks:
150
  f.write(line + "\n")
151
  print("📝 Failed chunk IDs saved to failed_chunks.txt")
152
 
153
 
154
  if __name__ == "__main__":
155
+ create_faiss_store()
 
 
 
 
 
 
backend/agent.py CHANGED
@@ -18,11 +18,13 @@ from langchain_core.tools import tool
18
  from langchain_openai import ChatOpenAI
19
  from langchain_core.output_parsers import StrOutputParser
20
  # from langchain_core.prompts import ChatPromptTemplate
 
21
  from langchain_huggingface import HuggingFaceEmbeddings
22
  from langchain_community.vectorstores import FAISS
23
  from langchain_community.retrievers import BM25Retriever
24
- from langchain_nvidia_ai_endpoints import ChatNVIDIA
25
  from langgraph.graph.message import add_messages
 
26
  from langgraph.graph import StateGraph, START, END
27
  from langgraph.prebuilt import ToolNode
28
  from langgraph.checkpoint.memory import MemorySaver
@@ -40,22 +42,22 @@ tool_log = logging.getLogger("tools")
40
 
41
  # import function from api.py
42
  from .g_cal import get_gcal_service
43
-
44
- api_key = os.environ.get("NVIDIA_API_KEY")
45
- if not api_key:
46
- raise RuntimeError("🚨 NVIDIA_API_KEY not found in environment!")
47
 
48
  # Constants
49
- FAISS_PATH = "backend/data/faiss_store/v41_1000-250"
50
- CHUNKS_PATH = "backend/data/all_chunks.json"
51
 
52
- # Validate files
53
  if not Path(FAISS_PATH).exists():
54
- raise FileNotFoundError(f"FAISS index not found at {FAISS_PATH}")
55
- if not Path(CHUNKS_PATH).exists():
56
- raise FileNotFoundError(f"Chunks file not found at {CHUNKS_PATH}")
 
 
 
57
 
58
- KRISHNA_BIO = """Krishna Vamsi Dhulipalla completed masters in Computer Science at Virginia Tech, awarded degree in december 2024, with over 3 years of experience across data engineering, machine learning research, and real-time analytics. He specializes in building scalable data systems and intelligent LLM-powered agents, with strong expertise in Python, PyTorch,Langgraph, autogen Hugging Face Transformers, and end-to-end ML pipelines."""
59
 
60
  # Load resources
61
  def load_chunks(path=CHUNKS_PATH) -> List[Dict]:
@@ -63,7 +65,10 @@ def load_chunks(path=CHUNKS_PATH) -> List[Dict]:
63
  return json.load(f)
64
 
65
  def load_faiss(path=FAISS_PATH, model_name="sentence-transformers/all-MiniLM-L6-v2") -> FAISS:
66
- embeddings = HuggingFaceEmbeddings(model_name=model_name)
 
 
 
67
  return FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)
68
 
69
  vectorstore = load_faiss()
@@ -72,19 +77,25 @@ all_texts = [chunk["text"] for chunk in all_chunks]
72
  metadatas = [chunk["metadata"] for chunk in all_chunks]
73
  bm25_retriever = BM25Retriever.from_texts(texts=all_texts, metadatas=metadatas)
74
 
75
- K_PER_QUERY = 10 # how many from each retriever
76
- TOP_K = 8 # final results to return
77
- RRF_K = 60 # reciprocal-rank-fusion constant
78
- RERANK_TOP_N = 50 # rerank this many fused hits
79
- MMR_LAMBDA = 0.7 # 0..1 (higher favors query relevance; lower favors diversity)
80
- CE_MODEL = "cross-encoder/ms-marco-MiniLM-L-6-v2"
81
  ALPHA = 0.7
82
 
83
- from sentence_transformers import CrossEncoder
84
- _cross_encoder = CrossEncoder(CE_MODEL)
85
 
86
- embeddings = HuggingFaceEmbeddings(
87
- model_name="sentence-transformers/all-MiniLM-L6-v2",
 
 
 
 
 
 
88
  model_kwargs={"device": "cpu"},
89
  encode_kwargs={"normalize_embeddings": True},
90
  )
@@ -220,12 +231,12 @@ def retriever(query: str) -> list[str]:
220
  return results
221
 
222
  # --- memory globals ---
223
- MEM_FAISS_PATH = os.getenv("MEM_FAISS_PATH", "/data/memory_faiss")
224
  mem_embeddings = embeddings
225
  memory_vs = None
226
  memory_dirty = False
227
  memory_write_count = 0
228
- MEM_AUTOSAVE_EVERY = 20
229
 
230
  def _ensure_memory_vs():
231
  global memory_vs
@@ -479,7 +490,7 @@ def download_resume() -> str:
479
  """
480
  Return a direct download link to Krishna's latest resume PDF.
481
  """
482
- BASE_URL = os.getenv("PUBLIC_BASE_URL", "http://localhost:8080")
483
  url = f"{BASE_URL}/resume/download"
484
  return (
485
  f"Here is Krishna’s latest resume:\n\n"
@@ -501,12 +512,12 @@ def get_portfolio_links() -> str:
501
  "- LinkedIn: https://www.linkedin.com/in/krishnavamsidhulipalla\n"
502
  "\n"
503
  "## 📦 Highlight Repos\n"
504
- "- LangGraph ChatBot: https://github.com/krishna-dhulipalla/LangGraph_ChatBot"
505
  "- Android World agent: https://github.com/krishna-dhulipalla/android_world\n"
506
- "- Gene Co-expression tootl: https://github.com/krishna-dhulipalla/gene_co-expression_tool"
507
  "- ProxyTuNER (cross-domain NER): https://github.com/krishna-creator/ProxytuNER\n"
508
  "- IntelliMeet (decentralized video conf): https://github.com/krishna-creator/SE-Project---IntelliMeet\n"
509
- "- More repos: https://github.com/krishna-dhulipalla?tab=repositories"
510
  "\n"
511
  "## 📚 Publications\n"
512
  "- BIBM 2024 paper: https://www.researchgate.net/publication/387924249_Leveraging_Machine_Learning_for_Predicting_Circadian_Transcription_in_mRNAs_and_lncRNAs\n"
 
18
  from langchain_openai import ChatOpenAI
19
  from langchain_core.output_parsers import StrOutputParser
20
  # from langchain_core.prompts import ChatPromptTemplate
21
+ from langchain_openai import OpenAIEmbeddings
22
  from langchain_huggingface import HuggingFaceEmbeddings
23
  from langchain_community.vectorstores import FAISS
24
  from langchain_community.retrievers import BM25Retriever
25
+ # from langchain_nvidia_ai_endpoints import ChatNVIDIA (removed)
26
  from langgraph.graph.message import add_messages
27
+
28
  from langgraph.graph import StateGraph, START, END
29
  from langgraph.prebuilt import ToolNode
30
  from langgraph.checkpoint.memory import MemorySaver
 
42
 
43
  # import function from api.py
44
  from .g_cal import get_gcal_service
45
+ from . import config
 
 
 
46
 
47
  # Constants
48
+ FAISS_PATH = str(config.FAISS_PATH)
49
+ CHUNKS_PATH = str(config.CHUNKS_PATH)
50
 
51
+ # Validate files (simplified check)
52
  if not Path(FAISS_PATH).exists():
53
+ print(f"⚠️ FAISS index not found at {FAISS_PATH}. Agent may fail if not initialized.")
54
+
55
+ def load_bio():
56
+ if config.BIO_PATH.exists():
57
+ return config.BIO_PATH.read_text(encoding="utf-8")
58
+ return "Krishna's bio is currently unavailable."
59
 
60
+ KRISHNA_BIO = load_bio()
61
 
62
  # Load resources
63
  def load_chunks(path=CHUNKS_PATH) -> List[Dict]:
 
65
  return json.load(f)
66
 
67
  def load_faiss(path=FAISS_PATH, model_name="sentence-transformers/all-MiniLM-L6-v2") -> FAISS:
68
+ if config.USE_OPENAI_EMBEDDING:
69
+ embeddings = OpenAIEmbeddings(model=config.EMBEDDING_MODEL_NAME, openai_api_key=config.OPENAI_API_KEY)
70
+ else:
71
+ embeddings = HuggingFaceEmbeddings(model_name=model_name)
72
  return FAISS.load_local(path, embeddings, allow_dangerous_deserialization=True)
73
 
74
  vectorstore = load_faiss()
 
77
  metadatas = [chunk["metadata"] for chunk in all_chunks]
78
  bm25_retriever = BM25Retriever.from_texts(texts=all_texts, metadatas=metadatas)
79
 
80
+ K_PER_QUERY = config.K_PER_QUERY
81
+ TOP_K = config.TOP_K
82
+ RRF_K = config.RRF_K
83
+ RERANK_TOP_N = config.RERANK_TOP_N
84
+ MMR_LAMBDA = config.MMR_LAMBDA
85
+ CE_MODEL = config.CROSS_ENCODER_MODEL
86
  ALPHA = 0.7
87
 
88
+ # from sentence_transformers import CrossEncoder
89
+ _cross_encoder = None # CrossEncoder(CE_MODEL)
90
 
91
+ if config.USE_OPENAI_EMBEDDING:
92
+ embeddings = OpenAIEmbeddings(
93
+ model=config.EMBEDDING_MODEL_NAME,
94
+ openai_api_key=config.OPENAI_API_KEY
95
+ )
96
+ else:
97
+ embeddings = HuggingFaceEmbeddings(
98
+ model_name=config.EMBEDDING_MODEL_NAME,
99
  model_kwargs={"device": "cpu"},
100
  encode_kwargs={"normalize_embeddings": True},
101
  )
 
231
  return results
232
 
233
  # --- memory globals ---
234
+ MEM_FAISS_PATH = config.MEM_FAISS_PATH
235
  mem_embeddings = embeddings
236
  memory_vs = None
237
  memory_dirty = False
238
  memory_write_count = 0
239
+ MEM_AUTOSAVE_EVERY = config.MEM_AUTOSAVE_EVERY
240
 
241
  def _ensure_memory_vs():
242
  global memory_vs
 
490
  """
491
  Return a direct download link to Krishna's latest resume PDF.
492
  """
493
+ BASE_URL = config.PUBLIC_BASE_URL
494
  url = f"{BASE_URL}/resume/download"
495
  return (
496
  f"Here is Krishna’s latest resume:\n\n"
 
512
  "- LinkedIn: https://www.linkedin.com/in/krishnavamsidhulipalla\n"
513
  "\n"
514
  "## 📦 Highlight Repos\n"
515
+ "- LangGraph ChatBot: https://github.com/krishna-dhulipalla/LangGraph_ChatBot\n"
516
  "- Android World agent: https://github.com/krishna-dhulipalla/android_world\n"
517
+ "- Gene Co-expression tool: https://github.com/krishna-dhulipalla/gene_co-expression_tool\n"
518
  "- ProxyTuNER (cross-domain NER): https://github.com/krishna-creator/ProxytuNER\n"
519
  "- IntelliMeet (decentralized video conf): https://github.com/krishna-creator/SE-Project---IntelliMeet\n"
520
+ "- More repos: https://github.com/krishna-dhulipalla?tab=repositories\n"
521
  "\n"
522
  "## 📚 Publications\n"
523
  "- BIBM 2024 paper: https://www.researchgate.net/publication/387924249_Leveraging_Machine_Learning_for_Predicting_Circadian_Transcription_in_mRNAs_and_lncRNAs\n"
backend/api.py CHANGED
@@ -13,7 +13,8 @@ from pathlib import Path
13
  import json, secrets, urllib.parse, os
14
  from google_auth_oauthlib.flow import Flow
15
  from .g_cal import get_gcal_service
16
- from .g_cal import SCOPES, TOKEN_FILE
 
17
 
18
  # logging + helpers
19
  import logging, os, time
@@ -27,10 +28,9 @@ from .agent import app as lg_app
27
 
28
  api = FastAPI(title="LangGraph Chat API")
29
 
30
- CLIENT_ID = os.getenv("GOOGLE_CLIENT_ID")
31
- CLIENT_SECRET = os.getenv("GOOGLE_CLIENT_SECRET")
32
- BASE_URL_RAW = os.getenv("PUBLIC_BASE_URL", "http://localhost:8000")
33
- BASE_URL = BASE_URL_RAW.rstrip("/") # no trailing slash
34
  REDIRECT_URI = f"{BASE_URL}/oauth/google/callback"
35
 
36
  # CORS (handy during dev; tighten in prod)
@@ -76,7 +76,7 @@ def list_routes():
76
  @api.get("/api/debug/oauth")
77
  def debug_oauth():
78
  return {
79
- "base_url_env": BASE_URL_RAW,
80
  "base_url_effective": BASE_URL,
81
  "redirect_uri_built": REDIRECT_URI,
82
  }
@@ -106,9 +106,9 @@ def google_reset():
106
  @api.get("/debug/env")
107
  def debug_env():
108
  return {
109
- "public_base_url": os.getenv("PUBLIC_BASE_URL"),
110
- "has_google_client_id": bool(os.getenv("GOOGLE_CLIENT_ID")),
111
- "has_google_client_secret": bool(os.getenv("GOOGLE_CLIENT_SECRET")),
112
  "ui_dist_exists": UI_DIST.is_dir(),
113
  "resume_exists": RESUME_PATH.is_file(),
114
  }
@@ -163,11 +163,8 @@ async def _event_stream_with_config(thread_id: str, message: str, request: Reque
163
  yield {"event": "done", "data": "1"}
164
 
165
  # --- Serve built React UI (ui/dist) under the same origin ---
166
- # repo_root = <project>/ ; this file is <project>/backend/api.py
167
- REPO_ROOT = Path(__file__).resolve().parents[1]
168
- UI_DIST = REPO_ROOT / "ui" / "dist"
169
-
170
- RESUME_PATH = REPO_ROOT / "backend" / "assets" / "KrishnaVamsiDhulipalla.pdf"
171
 
172
  @api.get("/resume/download")
173
  def resume_download():
 
13
  import json, secrets, urllib.parse, os
14
  from google_auth_oauthlib.flow import Flow
15
  from .g_cal import get_gcal_service
16
+ from .g_cal import SCOPES, TOKEN_FILE
17
+ from . import config
18
 
19
  # logging + helpers
20
  import logging, os, time
 
28
 
29
  api = FastAPI(title="LangGraph Chat API")
30
 
31
+ CLIENT_ID = config.GOOGLE_CLIENT_ID
32
+ CLIENT_SECRET = config.GOOGLE_CLIENT_SECRET
33
+ BASE_URL = config.PUBLIC_BASE_URL.rstrip("/")
 
34
  REDIRECT_URI = f"{BASE_URL}/oauth/google/callback"
35
 
36
  # CORS (handy during dev; tighten in prod)
 
76
  @api.get("/api/debug/oauth")
77
  def debug_oauth():
78
  return {
79
+ "base_url_env": config.PUBLIC_BASE_URL,
80
  "base_url_effective": BASE_URL,
81
  "redirect_uri_built": REDIRECT_URI,
82
  }
 
106
  @api.get("/debug/env")
107
  def debug_env():
108
  return {
109
+ "public_base_url": config.PUBLIC_BASE_URL,
110
+ "has_google_client_id": bool(config.GOOGLE_CLIENT_ID),
111
+ "has_google_client_secret": bool(config.GOOGLE_CLIENT_SECRET),
112
  "ui_dist_exists": UI_DIST.is_dir(),
113
  "resume_exists": RESUME_PATH.is_file(),
114
  }
 
163
  yield {"event": "done", "data": "1"}
164
 
165
  # --- Serve built React UI (ui/dist) under the same origin ---
166
+ UI_DIST = config.UI_DIST
167
+ RESUME_PATH = config.RESUME_PATH
 
 
 
168
 
169
  @api.get("/resume/download")
170
  def resume_download():
backend/config.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from pathlib import Path
3
+ from dotenv import load_dotenv
4
+
5
+ load_dotenv()
6
+
7
+ # Base Paths
8
+ BACKEND_DIR = Path(__file__).resolve().parent
9
+ PROJECT_ROOT = BACKEND_DIR.parent
10
+ PERSONAL_DATA_DIR = PROJECT_ROOT / "personal_data"
11
+ ASSETS_DIR = BACKEND_DIR / "assests" # Copied spelling from existing folder structure
12
+ DATA_DIR = BACKEND_DIR / "data"
13
+
14
+ # Data Paths
15
+ FAISS_PATH = DATA_DIR / "faiss_store" / "v30_1000-250" # Make this dynamic if needed?
16
+ CHUNKS_PATH = DATA_DIR / "all_chunks.json"
17
+ FAILED_CHUNKS_PATH = PROJECT_ROOT / "failed_chunks.txt"
18
+ BIO_PATH = PERSONAL_DATA_DIR / "bio.md"
19
+ RESUME_PATH = ASSETS_DIR / "KrishnaVamsiDhulipalla.pdf"
20
+ UI_DIST = PROJECT_ROOT / "ui" / "dist"
21
+
22
+ # API Keys
23
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
24
+ GOOGLE_CLIENT_ID = os.getenv("GOOGLE_CLIENT_ID")
25
+ GOOGLE_CLIENT_SECRET = os.getenv("GOOGLE_CLIENT_SECRET")
26
+ PUBLIC_BASE_URL = os.getenv("PUBLIC_BASE_URL", "http://localhost:8000")
27
+
28
+ # Embedding Config
29
+ EMBEDDING_MODEL_NAME = "text-embedding-3-small"
30
+ USE_OPENAI_EMBEDDING = True
31
+ CROSS_ENCODER_MODEL = "cross-encoder/ms-marco-MiniLM-L-6-v2"
32
+
33
+ # Retriever Config
34
+ K_PER_QUERY = 6
35
+ TOP_K = 8
36
+ RRF_K = 60
37
+ RERANK_TOP_N = 20
38
+ MMR_LAMBDA = 0.7
39
+
40
+ # Memory
41
+ MEM_FAISS_PATH = os.getenv("MEM_FAISS_PATH", str(DATA_DIR / "memory_faiss"))
42
+ MEM_AUTOSAVE_EVERY = 20
backend/data/all_chunks.json CHANGED
@@ -1,262 +1,112 @@
1
  [
2
  {
3
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\nI’m a **Machine Learning Engineer** with over **3 years of experience** designing and deploying intelligent AI systems, integrating backend infrastructure, and building real-time data workflows. I specialize in **LLM-powered agents**, **semantic search**, **bioinformatics AI models**, and **cloud-native ML infrastructure**.\n\nI earned my **M.S. in Computer Science** from **Virginia Tech** in December 2024 with a 3.95/4.0 GPA, focusing on large language models, intelligent agents, and scalable data systems. My work spans the full ML lifecycle—from research and fine-tuning transformer architectures to deploying production-ready applications on AWS and GCP.\n\nI’m passionate about **LLM-driven systems**, **multi-agent orchestration**, and **domain-adaptive ML**, particularly in **genomic data analysis** and **real-time analytics**.\n\n---",
4
  "metadata": {
5
- "source": "aprofile.md",
6
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
7
- "chunk_id": "aprofile.md_#0_c786e01b",
8
- "has_header": true,
9
- "word_count": 113
10
- }
11
- },
12
- {
13
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 🎯 Career Summary\n\n- 👨‍💻 3+ years of experience in **ML systems design**, **LLM-powered applications**, and **data engineering**\n- 🧬 Proven expertise in **transformer fine-tuning** (LoRA, soft prompting) for genomic classification\n- 🤖 Skilled in **LangChain**, **LangGraph**, **AutoGen**, and **CrewAI** for intelligent agent workflows\n- ☁️ Deep knowledge of **AWS** (S3, Glue, Lambda, SageMaker, ECS, CloudWatch) and **GCP** (BigQuery, Dataflow, Composer)\n- ⚡ Experienced in **real-time data pipelines** using **Apache Kafka**, **Spark**, **Airflow**, and **dbt**\n- 📊 Strong foundation in **synthetic data generation**, **domain adaptation**, and **cross-domain NER**",
14
- "metadata": {
15
- "source": "aprofile.md",
16
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
17
- "chunk_id": "aprofile.md_#1_0ba34e9a",
18
- "has_header": true,
19
- "word_count": 90
20
- }
21
- },
22
- {
23
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 🔭 Areas of Current Focus\n\n- Developing **LLM-powered mobile automation agents** for UI task execution\n- Architecting **retrieval-augmented generation (RAG)** systems with hybrid retrieval and cross-encoder reranking\n- Fine-tuning **DNA foundation models** like DNABERT & HyenaDNA for plant genomics\n- Building **real-time analytics pipelines** integrating Kafka, Spark, Airflow, and cloud services\n\n---\n\n# # 🎓 Education\n\n## # Virginia Tech — M.S. in Computer Science\n\n📍 Blacksburg, VA | Jan 2023 – Dec 2024 \n**GPA:** 3.95 / 4.0 \nRelevant Coursework: Distributed Systems, Machine Learning Optimization, Genomics, LLMs & Transformer Architectures\n\n## # Anna University — B.Tech in Computer Science and Engineering\n\n📍 Chennai, India | Jun 2018 – May 2022 \n**GPA:** 8.24 / 10 \nSpecialization: Real-Time Analytics, Cloud Systems, Software Engineering Principles\n\n---",
24
- "metadata": {
25
- "source": "aprofile.md",
26
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
27
- "chunk_id": "aprofile.md_#2_5b08eda4",
28
- "has_header": true,
29
- "word_count": 125
30
- }
31
- },
32
- {
33
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 🛠️ Technical Skills\n\n**Programming:** Python, R, SQL, JavaScript, TypeScript, Node.js, FastAPI, MongoDB \n**ML Frameworks:** PyTorch, TensorFlow, scikit-learn, Hugging Face Transformers \n**LLM & Agents:** LangChain, LangGraph, AutoGen, CrewAI, Prompt Engineering, RAG, LoRA, GANs \n**ML Techniques:** Self-Supervised Learning, Cross-Domain Adaptation, Hyperparameter Optimization, A/B Testing \n**Data Engineering:** Apache Spark, Kafka, dbt, Airflow, ETL Pipelines, Delta Lake, Snowflake \n**Cloud & Infra:** AWS (S3, Glue, Lambda, Redshift, ECS, SageMaker, CloudWatch), GCP (GCS, BigQuery, Dataflow, Composer) \n**DevOps/MLOps:** Docker, Kubernetes, MLflow, CI/CD, Weights & Biases \n**Visualization:** Tableau, Shiny (R), Plotly, Matplotlib \n**Other Tools:** Pandas, NumPy, Git, LangSmith, LangFlow, Linux\n\n---",
34
- "metadata": {
35
- "source": "aprofile.md",
36
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
37
- "chunk_id": "aprofile.md_#3_2035a7b6",
38
- "has_header": true,
39
- "word_count": 95
40
- }
41
- },
42
- {
43
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 💼 Professional Experience\n\n## # Cloud Systems LLC — ML Research Engineer (Current role)\n\n📍 Remote | Jul 2024 – Present\n\n- Designed and optimized **SQL-based data retrieval** and **batch + real-time pipelines**\n- Built automated **ETL workflows** integrating multiple data sources\n\n## # Virginia Tech — ML Research Engineer\n\n📍 Blacksburg, VA | Sep 2024 – Jul 2024\n\n- Developed **DNA sequence classification pipelines** using DNABERT & HyenaDNA with LoRA & soft prompting (94%+ accuracy)\n- Automated preprocessing of **1M+ genomic sequences** with Biopython & Airflow, reducing runtime by 40%\n- Built **LangChain-based semantic search** for genomics literature\n- Deployed fine-tuned LLMs using Docker, MLflow, and optionally SageMaker",
44
- "metadata": {
45
- "source": "aprofile.md",
46
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
47
- "chunk_id": "aprofile.md_#4_6b27454e",
48
- "has_header": true,
49
- "word_count": 111
50
- }
51
- },
52
- {
53
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n## # Virginia Tech — Research Assistant\n\n📍 Blacksburg, VA | Jun 2023 – May 2024\n\n- Built **genomic ETL pipelines** (Airflow + AWS Glue) improving research data availability by 50%\n- Automated retraining workflows via CI/CD, reducing manual workload by 40%\n- Benchmarked compute cluster performance to cut runtime costs by 15%\n\n## # UJR Technologies Pvt Ltd — Data Engineer\n\n📍 Hyderabad, India | Jul 2021 – Dec 2022\n\n- Migrated **batch ETL to real-time streaming** with Kafka & Spark (↓ latency 30%)\n- Deployed Dockerized microservices to AWS ECS, improving deployment speed by 25%\n- Optimized Snowflake schemas to improve query performance by 40%\n\n---",
54
- "metadata": {
55
- "source": "aprofile.md",
56
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
57
- "chunk_id": "aprofile.md_#5_0002cd5a",
58
- "has_header": true,
59
- "word_count": 108
60
- }
61
- },
62
- {
63
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 📊 Highlight Projects\n\n- **LLM-Based Android Agent** – Multi-step UI automation with memory, self-reflection, and context recovery (80%+ accuracy)\n\n## # Real-Time IoT-Based Temperature Forecasting\n\n- Kafka-based pipeline for 10K+ sensor readings with LLaMA 2-based time series model (91% accuracy)\n- Airflow + Looker dashboards (↓ manual reporting by 30%)\n- S3 lifecycle policies saved 40% storage cost with versioned backups \n 🔗 [GitHub](https://github.com/krishna-creator/Real-Time-IoT-Based-Temperature-Analytics-and-Forecasting)\n\n## # Proxy TuNER: Cross-Domain NER\n\n- Developed a proxy tuning method for domain-agnostic BERT\n- 15% generalization gain using gradient reversal + feature alignment\n- 70% cost reduction via logit-level ensembling \n 🔗 [GitHub](https://github.com/krishna-creator/ProxytuNER)",
64
- "metadata": {
65
- "source": "aprofile.md",
66
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
67
- "chunk_id": "aprofile.md_#6_48d0bbf0",
68
- "has_header": true,
69
- "word_count": 99
70
- }
71
- },
72
- {
73
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n## # IntelliMeet: AI-Powered Conferencing\n\n- Federated learning, end-to-end encrypted platform\n- Live attention detection using RetinaFace (<200ms latency)\n- Summarization with Transformer-based speech-to-text \n 🔗 [GitHub](https://github.com/krishna-creator/SE-Project---IntelliMeet)\n\n## # Automated Drone Image Analysis\n\n- Real-time crop disease detection using drone imagery\n- Used OpenCV, RAG, and GANs for synthetic data generation\n- Improved detection accuracy by 15% and reduced processing latency by 70%\n\n---",
74
- "metadata": {
75
- "source": "aprofile.md",
76
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
77
- "chunk_id": "aprofile.md_#7_65108870",
78
- "has_header": true,
79
- "word_count": 63
80
- }
81
- },
82
- {
83
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 📜 Certifications\n\n- 🏆 NVIDIA – Building RAG Agents with LLMs\n- 🏆 Google Cloud – Data Engineering Foundations\n- 🏆 AWS – Machine Learning Specialty\n- 🏆 Microsoft – MERN Stack Development\n- 🏆 Snowflake – End-to-End Data Engineering\n- 🏆 Coursera – Machine Learning Specialization \n 🔗 [View All Credentials](https://www.linkedin.com/in/krishnavamsidhulipalla/)\n\n---\n\n# # 📚 Research Publications\n\n- **IEEE BIBM 2024** – “Leveraging ML for Predicting Circadian Transcription in mRNAs and lncRNAs” \n [DOI: 10.1109/BIBM62325.2024.10822684](https://doi.org/10.1109/BIBM62325.2024.10822684)\n\n- **MLCB** – “Harnessing DNA Foundation Models for TF Binding Prediction in Plants”\n\n---",
84
- "metadata": {
85
- "source": "aprofile.md",
86
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
87
- "chunk_id": "aprofile.md_#8_86fd643f",
88
- "has_header": true,
89
- "word_count": 90
90
- }
91
- },
92
- {
93
- "text": "[HEADER] # 👋 Hello, I'm Krishna Vamsi Dhulipalla\n\n# # 🔗 External Links / Contact details\n\n- 🌐 [Personal Portfolio/ personal website](http://krishna-dhulipalla.github.io)\n- 🧪 [GitHub](https://github.com/Krishna-dhulipalla)\n- 💼 [LinkedIn](https://www.linkedin.com/in/krishnavamsidhulipalla)\n- 📬 dhulipallakrishnavamsi@gmail.com\n- 🤖 [Personal Chatbot](https://huggingface.co/spaces/krishnadhulipalla/Personal_ChatBot)",
94
- "metadata": {
95
- "source": "aprofile.md",
96
- "header": "# 👋 Hello, I'm Krishna Vamsi Dhulipalla",
97
- "chunk_id": "aprofile.md_#9_cf15266e",
98
- "has_header": true,
99
- "word_count": 27
100
- }
101
- },
102
- {
103
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\nThis document outlines the technical architecture and modular design of Krishna Vamsi Dhulipalla’s personal AI chatbot system, implemented using **LangChain**, **OpenAI**, **NVIDIA NIMs**, and **Gradio**. The assistant is built for intelligent, retriever-augmented, memory-aware interaction tailored to Krishna’s background and user context.\n\n---",
104
- "metadata": {
105
- "source": "Chatbot_Architecture_Notes.md",
106
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
107
- "chunk_id": "Chatbot_Architecture_Notes.md_#0_26c9c16b",
108
- "has_header": true,
109
- "word_count": 55
110
- }
111
- },
112
- {
113
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n| Purpose | Model Name | Role Description |\n| ----------------------------------- | ---------------------------------------- | ---------------------------------------------------------------- |\n| **Rephraser LLM** | `microsoft/phi-3-mini-4k-instruct` | Rewrites vague/short queries into detailed, keyword-rich queries |\n| **Relevance Classifier + Reranker** | `mistralai/mixtral-8x22b-instruct-v0.1` | Classifies query relevance to KB and reranks retrieved chunks |\n| **Answer Generator** | `nvidia/llama-3.1-nemotron-70b-instruct` | Provides rich, structured answers (replacing GPT-4o for testing) |\n| **Fallback Humor Model** | `mistralai/mixtral-8x22b-instruct-v0.1` | Responds humorously and redirects when out-of-scope |",
114
- "metadata": {
115
- "source": "Chatbot_Architecture_Notes.md",
116
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
117
- "chunk_id": "Chatbot_Architecture_Notes.md_#3_07e89ce2",
118
  "has_header": false,
119
- "word_count": 77
120
  }
121
  },
122
  {
123
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n| **Fallback Humor Model** | `mistralai/mixtral-8x22b-instruct-v0.1` | Responds humorously and redirects when out-of-scope |\n| **KnowledgeBase Updater** | `mistralai/mistral-7b-instruct-v0.3` | Extracts and updates structured memory about the user |",
124
  "metadata": {
125
  "source": "Chatbot_Architecture_Notes.md",
126
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
127
- "chunk_id": "Chatbot_Architecture_Notes.md_#4_c44438ef",
128
- "has_header": false,
129
- "word_count": 29
130
- }
131
- },
132
- {
133
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\nAll models are integrated via **LangChain RunnableChains**, supporting both streaming and structured execution.\n\n---",
134
- "metadata": {
135
- "source": "Chatbot_Architecture_Notes.md",
136
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
137
- "chunk_id": "Chatbot_Architecture_Notes.md_#5_ba043e37",
138
- "has_header": false,
139
- "word_count": 14
140
- }
141
- },
142
- {
143
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🔍 Retrieval Architecture\n\n## # ✅ **Hybrid Retrieval System**\n\nThe assistant combines:\n\n- **BM25Retriever**: Lexical keyword match\n- **FAISS Vector Search**: Dense embeddings from `sentence-transformers/all-MiniLM-L6-v2`\n\n## # 🧠 Rephrasing for Retrieval\n\n- The **user's query** is expanded using the Rephraser LLM, with awareness of `last_followups` and memory\n- **Rewritten query** is used throughout retrieval, validation, and reranking\n\n## # 📊 Scoring & Ranking\n\n- Each subquery is run through both BM25 and FAISS\n- Results are merged via weighted formula: \n `final_score = α * vector_score + (1 - α) * bm25_score`\n- Deduplication via fingerprinting\n- Top-k (default: 15) results are passed forward\n\n---",
144
- "metadata": {
145
- "source": "Chatbot_Architecture_Notes.md",
146
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
147
- "chunk_id": "Chatbot_Architecture_Notes.md_#6_345d4daa",
148
- "has_header": true,
149
- "word_count": 106
150
- }
151
- },
152
- {
153
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🔎 Validation + Chunk Reranking\n\n## # 🔍 Relevance Classification\n\n- LLM2 evaluates:\n - Whether the query (or rewritten query) is **in-scope**\n - If so, returns a **reranked list of chunk indices**\n- Memory (`last_input`, `last_output`, `last_followups`) and `rewritten_query` are included for better context\n\n## # ❌ If Out-of-Scope\n\n- Chunks are discarded\n- Response is generated using fallback LLM with humor and redirection\n\n---\n\n# # 🧠 Memory + Personalization\n\n## # 📘 KnowledgeBase Model\n\nTracks structured user data:\n\n- `user_name`, `company`, `last_input`, `last_output`\n- `summary_history`, `recent_interests`, `last_followups`, `tone`\n\n## # 🔄 Memory Updates\n\n- After every response, assistant extracts and updates memory\n- Handled via `RExtract` pipeline using `PydanticOutputParser` and KB LLM\n\n---",
154
- "metadata": {
155
- "source": "Chatbot_Architecture_Notes.md",
156
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
157
- "chunk_id": "Chatbot_Architecture_Notes.md_#7_9aedb3ef",
158
  "has_header": true,
159
- "word_count": 117
160
  }
161
  },
162
  {
163
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🧭 Orchestration Flow\n\n```text\nUser Input\n ↓\nRephraser LLM (phi-3-mini)\n ↓\nHybrid Retrieval (BM25 + FAISS)\n ↓\nValidation + Reranking (mixtral-8x22b)\n ↓\n ┌──────────────┐ ┌────────────────────┐\n In-Scope │ │ Out-of-Scope Query │\n (Top-k Chunks)│ │ (Memory-based only)│\n └────┬─────────┘ └─────────────┬──────┘\n ↓ ↓\n Answer LLM (nemotron-70b) Fallback Humor LLM\n```\n\n---\n\n# # 💬 Frontend Interface (Gradio)\n\n- Built using **Gradio ChatInterface + Blocks**\n- Features:\n - Responsive design\n - Custom CSS\n - Streaming markdown responses\n - Preloaded examples and auto-scroll\n\n---",
164
  "metadata": {
165
  "source": "Chatbot_Architecture_Notes.md",
166
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
167
- "chunk_id": "Chatbot_Architecture_Notes.md_#8_9eb3379f",
168
  "has_header": true,
169
- "word_count": 82
170
- }
171
- },
172
- {
173
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 💬 Frontend Interface (Gradio)\n\n- Built using **Gradio ChatInterface + Blocks**\n- Features:\n - Responsive design\n - Custom CSS\n - Streaming markdown responses\n - Preloaded examples and auto-scroll\n\n---\n\n# # 🧩 Additional Design Highlights\n\n- **Streaming**: Nemotron-70B used via LangChain streaming\n- **Prompt Engineering**: Answer prompts use markdown formatting, section headers, bullet points, and personalized sign-offs\n- **Memory-Aware Rewriting**: Handles vague replies like `\"yes\"` or `\"A\"` by mapping them to `last_followups`\n- **Knowledge Chunk Enrichment**: Each FAISS chunk includes synthetic summary and 3 QA-style synthetic queries\n\n---",
174
- "metadata": {
175
- "source": "Chatbot_Architecture_Notes.md",
176
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
177
- "chunk_id": "Chatbot_Architecture_Notes.md_#9_57d88724",
178
- "has_header": true,
179
- "word_count": 90
180
  }
181
  },
182
  {
183
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🚀 Future Enhancements\n\n- Tool calling for tasks like calendar access or Google search\n- Multi-model reranking agents\n- Memory summarization agents for long dialogs\n- Topic planners to group conversations\n- Retrieval filtering based on user interest and session\n\n---\n\nThis architecture is modular, extensible, and designed to simulate a memory-grounded, expert-aware personal assistant tailored to Krishna’s evolving knowledge and conversational goals.\n\n# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (LangGraph Version) (New and current one)\n\nThis document details the updated architecture of **Krishna Vamsi Dhulipalla’s** personal AI assistant, now fully implemented with **LangGraph** for orchestrated state management and tool execution. The system is designed for **retrieval-augmented, memory-grounded, and multi-turn conversational intelligence**, integrating **OpenAI GPT-4o**, **Hugging Face embeddings**, and **cross-encoder reranking**.\n\n---",
184
  "metadata": {
185
  "source": "Chatbot_Architecture_Notes.md",
186
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
187
- "chunk_id": "Chatbot_Architecture_Notes.md_#10_480e8b80",
188
  "has_header": true,
189
- "word_count": 126
190
  }
191
  },
192
  {
193
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🧱 Core Components\n\n## # 1. **Models & Their Roles**\n\n| Purpose | Model Name | Role Description |\n| -------------------------- | ---------------------------------------- | ------------------------------------------------ |\n| **Main Chat Model** | `gpt-4o` | Handles conversation, tool calls, and reasoning |\n| **Retriever Embeddings** | `sentence-transformers/all-MiniLM-L6-v2` | Embedding generation for FAISS vector search |\n| **Cross-Encoder Reranker** | `cross-encoder/ms-marco-MiniLM-L-6-v2` | Reranks retrieval results for semantic relevance |\n| **BM25 Retriever** | (LangChain BM25Retriever) | Keyword-based search complementing vector search |\n\nAll models are bound to LangGraph **StateGraph** nodes for structured execution.\n\n---",
194
  "metadata": {
195
  "source": "Chatbot_Architecture_Notes.md",
196
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
197
- "chunk_id": "Chatbot_Architecture_Notes.md_#11_eb402d95",
198
  "has_header": true,
199
- "word_count": 93
200
  }
201
  },
202
  {
203
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🔍 Retrieval System\n\n## # **Hybrid Retrieval**\n\n- **FAISS Vector Search** with normalized embeddings\n- **BM25Retriever** for lexical keyword matching\n- Combined using **Reciprocal Rank Fusion (RRF)**\n\n## # 📊 **Reranking & Diversity**\n\n1. Initial retrieval with FAISS & BM25 (top-K per retriever)\n2. Fusion via RRF scoring\n3. **Cross-Encoder reranking** (top-N candidates)\n4. **Maximal Marginal Relevance (MMR)** selection for diversity\n\n## # 🔎 Retriever Tool (`@tool retriever`)\n\n- Returns top passages with minimal duplication\n- Used in-system prompt to fetch accurate facts about Krishna\n\n---",
204
  "metadata": {
205
  "source": "Chatbot_Architecture_Notes.md",
206
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
207
- "chunk_id": "Chatbot_Architecture_Notes.md_#12_cab54fdc",
208
  "has_header": true,
209
- "word_count": 89
210
  }
211
  },
212
  {
213
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🧠 Memory System\n\n## # Long-Term Memory\n\n- **FAISS-based memory vector store** stored at `backend/data/memory_faiss`\n- Stores conversation summaries per thread ID\n\n## # Memory Search Tool (`@tool memory_search`)\n\n- Retrieves relevant conversation snippets by semantic similarity\n- Supports **thread-scoped** search for contextual continuity\n\n## # Memory Write Node\n\n- After each AI response, stores `[Q]: ... [A]: ...` summary\n- Autosaves after every `MEM_AUTOSAVE_EVERY` turns or on thread end\n\n---",
214
  "metadata": {
215
  "source": "Chatbot_Architecture_Notes.md",
216
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
217
- "chunk_id": "Chatbot_Architecture_Notes.md_#13_b0899bfc",
218
  "has_header": true,
219
- "word_count": 73
220
  }
221
  },
222
  {
223
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🧭 Orchestration Flow (LangGraph)\n\n```mermaid\ngraph TD\n A[START] --> B[agent node]\n B -->|tool call| C[tools node]\n B -->|no tool| D[memory_write]\n C --> B\n D --> E[END]\n```\n\n## # **Nodes**:\n\n- **agent**: Calls main LLM with conversation window + system prompt\n- **tools**: Executes retriever or memory search tools\n- **memory_write**: Persists summaries to long-term memory\n\n## # **Conditional Edges**:\n\n- From **agent** → `tools` if tool call detected\n- From **agent** → `memory_write` if no tool call\n\n---\n\n# # 💬 System Prompt\n\nThe assistant:\n\n- Uses retriever and memory search tools to gather facts about Krishna\n- Avoids fabrication and requests clarification when needed\n- Responds humorously when off-topic but steers back to Krishna’s expertise\n- Formats with Markdown, headings, and bullet points\n\nEmbedded **Krishna’s Bio** provides static grounding context.\n\n---",
224
  "metadata": {
225
- "source": "Chatbot_Architecture_Notes.md",
226
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
227
- "chunk_id": "Chatbot_Architecture_Notes.md_#14_c58f0c4c",
228
  "has_header": true,
229
- "word_count": 135
230
  }
231
  },
232
  {
233
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🌐 API & Streaming\n\n- **Backend**: FastAPI (`backend/api.py`)\n - `/chat` SSE endpoint streams tokens in real-time\n - Passes `thread_id` & `is_final` to LangGraph for stateful conversations\n- **Frontend**: React + Tailwind (custom chat UI)\n - Threaded conversation storage in browser `localStorage`\n - Real-time token rendering via `EventSource`\n - Features: new chat, clear chat, delete thread, suggestions\n\n---\n\n# # 🖥️ Frontend Highlights\n\n- Dark theme ChatGPT-style UI\n- Sidebar for thread management\n- Live streaming responses with Markdown rendering\n- Suggestion prompts for quick interactions\n- Message actions: copy, edit, regenerate\n\n---",
234
  "metadata": {
235
- "source": "Chatbot_Architecture_Notes.md",
236
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
237
- "chunk_id": "Chatbot_Architecture_Notes.md_#15_07f432c1",
238
  "has_header": true,
239
- "word_count": 94
240
  }
241
  },
242
  {
243
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🖥️ Frontend Highlights\n\n- Dark theme ChatGPT-style UI\n- Sidebar for thread management\n- Live streaming responses with Markdown rendering\n- Suggestion prompts for quick interactions\n- Message actions: copy, edit, regenerate\n\n---\n\n# # 🧩 Design Improvements Over Previous Version\n\n- **LangGraph StateGraph** ensures explicit control of message flow\n- **Thread-scoped memory** enables multi-session personalization\n- **Hybrid RRF + Cross-Encoder + MMR** retrieval pipeline improves relevance & diversity\n- **SSE streaming** for low-latency feedback\n- Decoupled **retrieval** and **memory** as separate tools for modularity\n\n---",
244
  "metadata": {
245
- "source": "Chatbot_Architecture_Notes.md",
246
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
247
- "chunk_id": "Chatbot_Architecture_Notes.md_#16_0247e9ee",
248
  "has_header": true,
249
- "word_count": 88
250
  }
251
  },
252
  {
253
- "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)\n\n# # 🚀 Future Enhancements\n\n- Integrate **tool calling** for external APIs (calendar, search)\n- Summarization agents for condensing memory store\n- Interest-based retrieval filtering\n- Multi-agent orchestration for complex tasks\n\n---\n\nThis LangGraph-powered architecture delivers a **stateful, retrieval-augmented, memory-aware personal assistant** optimized for Krishna’s profile and designed for **extensibility, performance, and precision**.",
254
  "metadata": {
255
- "source": "Chatbot_Architecture_Notes.md",
256
- "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)",
257
- "chunk_id": "Chatbot_Architecture_Notes.md_#17_328ca9e7",
258
  "has_header": true,
259
- "word_count": 53
260
  }
261
  },
262
  {
@@ -379,6 +229,46 @@
379
  "word_count": 121
380
  }
381
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
382
  {
383
  "text": "[HEADER] ## 🧗‍♂️ Hobbies & Passions\n\n## 🧗‍♂️ Hobbies & Passions\n\nHere’s what keeps me energized and curious outside of work:\n\n- **🥾 Hiking & Outdoor Adventures** — Nothing clears my mind like a good hike.\n- **🎬 Marvel Fan for Life** — I’ve seen every Marvel movie, and I’d probably give my life for the MCU (Team Iron Man, always).\n- **🏏 Cricket Enthusiast** — Whether it's IPL or gully cricket, I'm all in.\n- **🚀 Space Exploration Buff** — Obsessed with rockets, Mars missions, and the future of interplanetary travel.\n- **🍳 Cooking Explorer** — I enjoy experimenting with recipes, especially fusion dishes.\n- **🕹️ Gaming & Reverse Engineering** — I love diving into game logic and breaking things down just to rebuild them better.\n- **🧑‍🤝‍🧑 Time with Friends** — Deep conversations, spontaneous trips, or chill evenings—friends keep me grounded.\n\n---",
384
  "metadata": {
 
1
  [
2
  {
3
+ "text": "[HEADER] Krishna Vamsi Dhulipalla is a Software Engineer specializing in generic workflows and AI platforms. He currently works at **Cloud Systems LLC**, where he architects LangGraph-based agents to automate data auditing. Previously, he served as a Machine Learning Engineer at **Virginia Tech**, optimizing genomic models with LoRA/soft prompting, and as a Software Engineer at **UJR Technologies**, building ML SDKs and CI/CD pipelines. He holds an M.S. in Computer Science from Virginia Tech (Dec 2024) and has significant expertise in **LangGraph**, **Kubernetes**, **PyTorch**, and **MLOps**.\n\nKrishna Vamsi Dhulipalla is a Software Engineer specializing in generic workflows and AI platforms. He currently works at **Cloud Systems LLC**, where he architects LangGraph-based agents to automate data auditing. Previously, he served as a Machine Learning Engineer at **Virginia Tech**, optimizing genomic models with LoRA/soft prompting, and as a Software Engineer at **UJR Technologies**, building ML SDKs and CI/CD pipelines. He holds an M.S. in Computer Science from Virginia Tech (Dec 2024) and has significant expertise in **LangGraph**, **Kubernetes**, **PyTorch**, and **MLOps**.",
4
  "metadata": {
5
+ "source": "bio.md",
6
+ "header": "Krishna Vamsi Dhulipalla is a Software Engineer specializing in generic workflows and AI platforms. He currently works at **Cloud Systems LLC**, where he architects LangGraph-based agents to automate data auditing. Previously, he served as a Machine Learning Engineer at **Virginia Tech**, optimizing genomic models with LoRA/soft prompting, and as a Software Engineer at **UJR Technologies**, building ML SDKs and CI/CD pipelines. He holds an M.S. in Computer Science from Virginia Tech (Dec 2024) and has significant expertise in **LangGraph**, **Kubernetes**, **PyTorch**, and **MLOps**.",
7
+ "chunk_id": "bio.md_#0_9ac3944c",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
8
  "has_header": false,
9
+ "word_count": 83
10
  }
11
  },
12
  {
13
+ "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\n# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\nThis document details the architecture of **Krishna Vamsi Dhulipalla’s** personal AI assistant, implemented with **LangGraph** for orchestrated state management and tool execution. The system is designed for **retrieval-augmented, memory-grounded, and multi-turn conversational intelligence**, integrating **OpenAI GPT-4o**, **Hugging Face embeddings**, and **cross-encoder reranking**.\n\n---",
14
  "metadata": {
15
  "source": "Chatbot_Architecture_Notes.md",
16
+ "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant",
17
+ "chunk_id": "Chatbot_Architecture_Notes.md_#0_f30fc781",
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
18
  "has_header": true,
19
+ "word_count": 52
20
  }
21
  },
22
  {
23
+ "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\n# # 🧱 Core Components\n\n## # 1. **Models & Their Roles**\n\n| Purpose | Model Name | Role Description |\n| -------------------------- | ---------------------------------------- | ------------------------------------------------ |\n| **Main Chat Model** | `gpt-4o` | Handles conversation, tool calls, and reasoning |\n| **Retriever Embeddings** | `sentence-transformers/all-MiniLM-L6-v2` | Embedding generation for FAISS vector search |\n| **Cross-Encoder Reranker** | `cross-encoder/ms-marco-MiniLM-L-6-v2` | Reranks retrieval results for semantic relevance |\n| **BM25 Retriever** | (LangChain BM25Retriever) | Keyword-based search complementing vector search |\n\nAll models are bound to LangGraph **StateGraph** nodes for structured execution.\n\n---",
24
  "metadata": {
25
  "source": "Chatbot_Architecture_Notes.md",
26
+ "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant",
27
+ "chunk_id": "Chatbot_Architecture_Notes.md_#1_eb402d95",
28
  "has_header": true,
29
+ "word_count": 93
 
 
 
 
 
 
 
 
 
 
30
  }
31
  },
32
  {
33
+ "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\n# # 🔍 Retrieval System\n\n## # **Hybrid Retrieval**\n\n- **FAISS Vector Search** with normalized embeddings\n- **BM25Retriever** for lexical keyword matching\n- Combined using **Reciprocal Rank Fusion (RRF)**\n\n## # 📊 **Reranking & Diversity**\n\n1. Initial retrieval with FAISS & BM25 (top-K per retriever)\n2. Fusion via RRF scoring\n3. **Cross-Encoder reranking** (top-N candidates)\n4. **Maximal Marginal Relevance (MMR)** selection for diversity\n\n## # 🔎 Retriever Tool (`@tool retriever`)\n\n- Returns top passages with minimal duplication\n- Used in-system prompt to fetch accurate facts about Krishna\n\n---",
34
  "metadata": {
35
  "source": "Chatbot_Architecture_Notes.md",
36
+ "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant",
37
+ "chunk_id": "Chatbot_Architecture_Notes.md_#2_cab54fdc",
38
  "has_header": true,
39
+ "word_count": 89
40
  }
41
  },
42
  {
43
+ "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\n# # 🧠 Memory System\n\n## # Long-Term Memory\n\n- **FAISS-based memory vector store** stored at `backend/data/memory_faiss`\n- Stores conversation summaries per thread ID\n\n## # Memory Search Tool (`@tool memory_search`)\n\n- Retrieves relevant conversation snippets by semantic similarity\n- Supports **thread-scoped** search for contextual continuity\n\n## # Memory Write Node\n\n- After each AI response, stores `[Q]: ... [A]: ...` summary\n- Autosaves after every `MEM_AUTOSAVE_EVERY` turns or on thread end\n\n---",
44
  "metadata": {
45
  "source": "Chatbot_Architecture_Notes.md",
46
+ "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant",
47
+ "chunk_id": "Chatbot_Architecture_Notes.md_#3_b0899bfc",
48
  "has_header": true,
49
+ "word_count": 73
50
  }
51
  },
52
  {
53
+ "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\n# # 🧭 Orchestration Flow (LangGraph)\n\n```mermaid\ngraph TD\n A[START] --> B[agent node]\n B -->|tool call| C[tools node]\n B -->|no tool| D[memory_write]\n C --> B\n D --> E[END]\n```\n\n## # **Nodes**:\n\n- **agent**: Calls main LLM with conversation window + system prompt\n- **tools**: Executes retriever or memory search tools\n- **memory_write**: Persists summaries to long-term memory\n\n## # **Conditional Edges**:\n\n- From **agent** `tools` if tool call detected\n- From **agent** → `memory_write` if no tool call\n\n---\n\n# # 💬 System Prompt\n\nThe assistant:\n\n- Uses retriever and memory search tools to gather facts about Krishna\n- Avoids fabrication and requests clarification when needed\n- Responds humorously when off-topic but steers back to Krishna’s expertise\n- Formats with Markdown, headings, and bullet points\n\nEmbedded **Krishna’s Bio** provides static grounding context.\n\n---",
54
  "metadata": {
55
  "source": "Chatbot_Architecture_Notes.md",
56
+ "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant",
57
+ "chunk_id": "Chatbot_Architecture_Notes.md_#4_c58f0c4c",
58
  "has_header": true,
59
+ "word_count": 135
60
  }
61
  },
62
  {
63
+ "text": "[HEADER] # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant\n\n# # 🌐 API & Streaming\n\n- **Backend**: FastAPI (`backend/api.py`)\n - `/chat` SSE endpoint streams tokens in real-time\n - Passes `thread_id` & `is_final` to LangGraph for stateful conversations\n- **Frontend**: React + Tailwind (custom chat UI)\n - Threaded conversation storage in browser `localStorage`\n - Real-time token rendering via `EventSource`\n - Features: new chat, clear chat, delete thread, suggestions\n\n---\n\n# # 🧩 Design Improvements\n\n- **LangGraph StateGraph** ensures explicit control of message flow\n- **Thread-scoped memory** enables multi-session personalization\n- **Hybrid RRF + Cross-Encoder + MMR** retrieval pipeline improves relevance & diversity\n- **SSE streaming** for low-latency feedback\n- **Decoupled retrieval** and **memory** as separate tools for modularity",
64
  "metadata": {
65
  "source": "Chatbot_Architecture_Notes.md",
66
+ "header": "# 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant",
67
+ "chunk_id": "Chatbot_Architecture_Notes.md_#5_50777b59",
68
  "has_header": true,
69
+ "word_count": 108
70
  }
71
  },
72
  {
73
+ "text": "[HEADER] # Education\n\n# Education\n\n# # Virginia Tech | Master of Science, Computer Science\n\n**Dec 2024**\n\n- **GPA**: 3.9/4.0\n\n# # Vel Tech University | Bachelor of Technology, Computer Science and Engineering\n\n- **GPA**: 8.24/10",
74
  "metadata": {
75
+ "source": "education.md",
76
+ "header": "# Education",
77
+ "chunk_id": "education.md_#0_ac6a482e",
78
  "has_header": true,
79
+ "word_count": 33
80
  }
81
  },
82
  {
83
+ "text": "[HEADER] # Professional Experience\n\n# # Software Engineer - AI Platform | Cloud Systems LLC | US Remote\n\n**Jul 2025 - Present**\n\n- **Agentic Workflow Automation**: Architected an agentic workflow using **LangGraph** and **ReAct** to automate SQL generation. This system automated **~65%** of ad-hoc data-auditing requests from internal stakeholders, reducing the average response time from **4 hours to under 2 minutes**.\n- **ETL Optimization**: Optimized data ingestion performance by rebuilding ETL pipelines with batched I/O, incremental refresh logic, and dependency pruning, cutting daily execution runtime by **25%**.\n- **Infrastructure & Reliability**: Improved production reliability by shipping the agent service on **Kubernetes** with autoscaling and rolling deploys, adding alerts and rollback steps for failed releases.\n- **Contract Testing**: Improved cross-service reliability by implementing **Pydantic** schema validation and contract tests, preventing multiple breaking changes from reaching production.",
84
  "metadata": {
85
+ "source": "experience.md",
86
+ "header": "# Professional Experience",
87
+ "chunk_id": "experience.md_#1_33df1d29",
88
  "has_header": true,
89
+ "word_count": 131
90
  }
91
  },
92
  {
93
+ "text": "[HEADER] # Professional Experience\n\n# # Machine Learning Engineer | Virginia Tech, Dept. of Plant Sciences | Blacksburg, VA\n\n**Aug 2024 - Jul 2025**\n\n- **Model Optimization**: Increased genomics sequence classification throughput by **32%** by applying **LoRA** and **soft prompting** methods. Packaged repeatable **PyTorch** pipelines that cut per-experiment training time by **4.5 hours**.\n- **HPC Orchestration**: Developed an ML orchestration layer for distributed GPU training on HPC clusters. Engineered checkpoint-resume logic that handled preemptive node shutdowns, optimizing resource utilization and reducing compute waste by **15%**.\n- **MLOps**: Reduced research environment setup time from hours to minutes by containerizing fine-tuned models with **Docker** and managing the experimental lifecycle (versions, hyperparameters, and weights) via **MLflow**.",
94
  "metadata": {
95
+ "source": "experience.md",
96
+ "header": "# Professional Experience",
97
+ "chunk_id": "experience.md_#2_a1069896",
98
  "has_header": true,
99
+ "word_count": 109
100
  }
101
  },
102
  {
103
+ "text": "[HEADER] # Professional Experience\n\n# # Software Engineer | UJR Technologies Pvt Ltd | Hyderabad, India\n\n**Jul 2021 - Dec 2022**\n\n- **API & SDK Development**: Designed and maintained standardized **REST APIs** and **Python-based SDKs** to streamline the ML development lifecycle, reducing cross-team integration defects by **40%**.\n- **Model Serving**: Engineered model-serving endpoints with automated input validation and deployment health checks, lowering prediction-related failures by **30%** for ML-driven features.\n- **CI/CD Pipeline**: Automated CI/CD pipelines via **GitHub Actions** with comprehensive test coverage and scripted rollback procedures, decreasing release failures by **20%** across production environments.",
104
  "metadata": {
105
+ "source": "experience.md",
106
+ "header": "# Professional Experience",
107
+ "chunk_id": "experience.md_#3_f6eb60f5",
108
  "has_header": true,
109
+ "word_count": 90
110
  }
111
  },
112
  {
 
229
  "word_count": 121
230
  }
231
  },
232
+ {
233
+ "text": "[HEADER] # Projects\n\n# Projects\n\n# # Autonomous Multi-Agent Web UI Automation System\n\n- **Overview**: Developed a multi-agent system using **LangGraph** and **Playwright** to navigate non-deterministic UI changes across 5 high-complexity SaaS platforms.\n- **Impact**: Increased task completion success rate from **68% to 94%** by implementing a two-stage verification loop with step-level assertions and exponential backoff for dynamic DOM states.\n- **Observability**: Integrated **LangSmith** traces for observability, reducing the mean-time-to-debug for broken selectors by **14 minutes per incident**.",
234
+ "metadata": {
235
+ "source": "projects.md",
236
+ "header": "# Projects",
237
+ "chunk_id": "projects.md_#0_89043f3a",
238
+ "has_header": true,
239
+ "word_count": 75
240
+ }
241
+ },
242
+ {
243
+ "text": "[HEADER] # Projects\n\n# # Proxy TuNER: Advancing Cross-Domain Named Entity Recognition through Proxy Tuning\n\n- **Overview**: Improved cross-domain NER F1-score by **8%** by implementing a proxy-tuning approach for **LLaMA 2 models** (7B, 7B-Chat, 13B) using logit ensembling and gradient reversal.\n- **Optimization**: Optimized inference performance by **30%** and reduced training costs by **70%** through distributed execution and model path optimizations in **PyTorch**.\n\n# # IntelliMeet: AI-Enabled Decentralized Video Conferencing App\n\n- **Overview**: Architected a secure, decentralized video platform using **WebRTC** and **federated learning** to maintain data privacy while sharing only aggregated model updates.\n- **Reliability**: Reduced call dropouts by **25%** by engineering network recovery logic and on-device **RetinaFace** attention detection for client-side quality adaptation.",
244
+ "metadata": {
245
+ "source": "projects.md",
246
+ "header": "# Projects",
247
+ "chunk_id": "projects.md_#1_48eb2f1e",
248
+ "has_header": true,
249
+ "word_count": 112
250
+ }
251
+ },
252
+ {
253
+ "text": "[HEADER] # Publications\n\n# Publications\n\n- **Predicting Circadian Transcription in mRNAs and lncRNAs**, IEEE BIBM 2024\n- **DNA Foundation Models for Cross-Species TF Binding Prediction**, NeurIPS ML in CompBio 2025\n- **Multi-omics atlas of the plant nuclear envelope**, Science Advances (under review) 2025, University of California, Berkeley",
254
+ "metadata": {
255
+ "source": "publications.md",
256
+ "header": "# Publications",
257
+ "chunk_id": "publications.md_#0_3ab998c3",
258
+ "has_header": true,
259
+ "word_count": 44
260
+ }
261
+ },
262
+ {
263
+ "text": "[HEADER] # Technical Skills\n\n# Technical Skills\n\n# # Languages\n\n- Python, SQL, TypeScript, JavaScript, MongoDB\n\n# # ML & AI Frameworks\n\n- PyTorch, Transformers, LangChain, LangGraph, LoRA, RAG, NLP, SKLearn, XGBoost\n\n# # Data & Infrastructure\n\n- Docker, Kubernetes, Apache Airflow, MLflow, Redis, FAISS, AWS, GCP, Git\n\n# # Tools & Observability\n\n- LangSmith, Grafana, CI/CD (GitHub Actions/Jenkins), Weights & Biases, Linux",
264
+ "metadata": {
265
+ "source": "skills.md",
266
+ "header": "# Technical Skills",
267
+ "chunk_id": "skills.md_#0_8e74dc40",
268
+ "has_header": true,
269
+ "word_count": 59
270
+ }
271
+ },
272
  {
273
  "text": "[HEADER] ## 🧗‍♂️ Hobbies & Passions\n\n## 🧗‍♂️ Hobbies & Passions\n\nHere’s what keeps me energized and curious outside of work:\n\n- **🥾 Hiking & Outdoor Adventures** — Nothing clears my mind like a good hike.\n- **🎬 Marvel Fan for Life** — I’ve seen every Marvel movie, and I’d probably give my life for the MCU (Team Iron Man, always).\n- **🏏 Cricket Enthusiast** — Whether it's IPL or gully cricket, I'm all in.\n- **🚀 Space Exploration Buff** — Obsessed with rockets, Mars missions, and the future of interplanetary travel.\n- **🍳 Cooking Explorer** — I enjoy experimenting with recipes, especially fusion dishes.\n- **🕹️ Gaming & Reverse Engineering** — I love diving into game logic and breaking things down just to rebuild them better.\n- **🧑‍🤝‍🧑 Time with Friends** — Deep conversations, spontaneous trips, or chill evenings—friends keep me grounded.\n\n---",
274
  "metadata": {
backend/data/faiss_store/v30_1000-250/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:bd1d8e7e716c51242a7b4af9c2f8a7900de4a0a5b7d90374286abd2c75bd3e7c
3
+ size 184365
backend/data/faiss_store/v30_1000-250/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:685b70a0fbd47f8ccb3e9f940de304370dd699ef9d72e0dd17256a007d3b6528
3
+ size 29588
backend/data/faiss_store/v42_1000-250/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:401590397d9754522466693adaca908407d119593e187dfb212e938bb5313eb4
3
+ size 64557
backend/data/faiss_store/v42_1000-250/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:1adfd6e3ac55f44a3d8acde031bf6c5a734ff0e6ab665c61e9cddbbbe12c5e58
3
+ size 40986
personal_data/Chatbot_Architecture_Notes.md CHANGED
@@ -1,137 +1,6 @@
1
- # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (old and intial one)
2
 
3
- This document outlines the technical architecture and modular design of Krishna Vamsi Dhulipalla’s personal AI chatbot system, implemented using **LangChain**, **OpenAI**, **NVIDIA NIMs**, and **Gradio**. The assistant is built for intelligent, retriever-augmented, memory-aware interaction tailored to Krishna’s background and user context.
4
-
5
- ---
6
-
7
- ## 🧱 Core Components
8
-
9
- ### 1. **LLMs Used and Their Roles**
10
-
11
- | Purpose | Model Name | Role Description |
12
- | ----------------------------------- | ---------------------------------------- | ---------------------------------------------------------------- |
13
- | **Rephraser LLM** | `microsoft/phi-3-mini-4k-instruct` | Rewrites vague/short queries into detailed, keyword-rich queries |
14
- | **Relevance Classifier + Reranker** | `mistralai/mixtral-8x22b-instruct-v0.1` | Classifies query relevance to KB and reranks retrieved chunks |
15
- | **Answer Generator** | `nvidia/llama-3.1-nemotron-70b-instruct` | Provides rich, structured answers (replacing GPT-4o for testing) |
16
- | **Fallback Humor Model** | `mistralai/mixtral-8x22b-instruct-v0.1` | Responds humorously and redirects when out-of-scope |
17
- | **KnowledgeBase Updater** | `mistralai/mistral-7b-instruct-v0.3` | Extracts and updates structured memory about the user |
18
-
19
- All models are integrated via **LangChain RunnableChains**, supporting both streaming and structured execution.
20
-
21
- ---
22
-
23
- ## 🔍 Retrieval Architecture
24
-
25
- ### ✅ **Hybrid Retrieval System**
26
-
27
- The assistant combines:
28
-
29
- - **BM25Retriever**: Lexical keyword match
30
- - **FAISS Vector Search**: Dense embeddings from `sentence-transformers/all-MiniLM-L6-v2`
31
-
32
- ### 🧠 Rephrasing for Retrieval
33
-
34
- - The **user's query** is expanded using the Rephraser LLM, with awareness of `last_followups` and memory
35
- - **Rewritten query** is used throughout retrieval, validation, and reranking
36
-
37
- ### 📊 Scoring & Ranking
38
-
39
- - Each subquery is run through both BM25 and FAISS
40
- - Results are merged via weighted formula:
41
- `final_score = α * vector_score + (1 - α) * bm25_score`
42
- - Deduplication via fingerprinting
43
- - Top-k (default: 15) results are passed forward
44
-
45
- ---
46
-
47
- ## 🔎 Validation + Chunk Reranking
48
-
49
- ### 🔍 Relevance Classification
50
-
51
- - LLM2 evaluates:
52
- - Whether the query (or rewritten query) is **in-scope**
53
- - If so, returns a **reranked list of chunk indices**
54
- - Memory (`last_input`, `last_output`, `last_followups`) and `rewritten_query` are included for better context
55
-
56
- ### ❌ If Out-of-Scope
57
-
58
- - Chunks are discarded
59
- - Response is generated using fallback LLM with humor and redirection
60
-
61
- ---
62
-
63
- ## 🧠 Memory + Personalization
64
-
65
- ### 📘 KnowledgeBase Model
66
-
67
- Tracks structured user data:
68
-
69
- - `user_name`, `company`, `last_input`, `last_output`
70
- - `summary_history`, `recent_interests`, `last_followups`, `tone`
71
-
72
- ### 🔄 Memory Updates
73
-
74
- - After every response, assistant extracts and updates memory
75
- - Handled via `RExtract` pipeline using `PydanticOutputParser` and KB LLM
76
-
77
- ---
78
-
79
- ## 🧭 Orchestration Flow
80
-
81
- ```text
82
- User Input
83
-
84
- Rephraser LLM (phi-3-mini)
85
-
86
- Hybrid Retrieval (BM25 + FAISS)
87
-
88
- Validation + Reranking (mixtral-8x22b)
89
-
90
- ┌──────────────┐ ┌────────────────────┐
91
- │ In-Scope │ │ Out-of-Scope Query │
92
- │ (Top-k Chunks)│ │ (Memory-based only)│
93
- └────┬─────────┘ └─────────────┬──────┘
94
- ↓ ↓
95
- Answer LLM (nemotron-70b) Fallback Humor LLM
96
- ```
97
-
98
- ---
99
-
100
- ## 💬 Frontend Interface (Gradio)
101
-
102
- - Built using **Gradio ChatInterface + Blocks**
103
- - Features:
104
- - Responsive design
105
- - Custom CSS
106
- - Streaming markdown responses
107
- - Preloaded examples and auto-scroll
108
-
109
- ---
110
-
111
- ## 🧩 Additional Design Highlights
112
-
113
- - **Streaming**: Nemotron-70B used via LangChain streaming
114
- - **Prompt Engineering**: Answer prompts use markdown formatting, section headers, bullet points, and personalized sign-offs
115
- - **Memory-Aware Rewriting**: Handles vague replies like `"yes"` or `"A"` by mapping them to `last_followups`
116
- - **Knowledge Chunk Enrichment**: Each FAISS chunk includes synthetic summary and 3 QA-style synthetic queries
117
-
118
- ---
119
-
120
- ## 🚀 Future Enhancements
121
-
122
- - Tool calling for tasks like calendar access or Google search
123
- - Multi-model reranking agents
124
- - Memory summarization agents for long dialogs
125
- - Topic planners to group conversations
126
- - Retrieval filtering based on user interest and session
127
-
128
- ---
129
-
130
- This architecture is modular, extensible, and designed to simulate a memory-grounded, expert-aware personal assistant tailored to Krishna’s evolving knowledge and conversational goals.
131
-
132
- # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant (LangGraph Version) (New and current one)
133
-
134
- This document details the updated architecture of **Krishna Vamsi Dhulipalla’s** personal AI assistant, now fully implemented with **LangGraph** for orchestrated state management and tool execution. The system is designed for **retrieval-augmented, memory-grounded, and multi-turn conversational intelligence**, integrating **OpenAI GPT-4o**, **Hugging Face embeddings**, and **cross-encoder reranking**.
135
 
136
  ---
137
 
@@ -240,33 +109,10 @@ Embedded **Krishna’s Bio** provides static grounding context.
240
 
241
  ---
242
 
243
- ## 🖥️ Frontend Highlights
244
-
245
- - Dark theme ChatGPT-style UI
246
- - Sidebar for thread management
247
- - Live streaming responses with Markdown rendering
248
- - Suggestion prompts for quick interactions
249
- - Message actions: copy, edit, regenerate
250
-
251
- ---
252
-
253
- ## 🧩 Design Improvements Over Previous Version
254
 
255
  - **LangGraph StateGraph** ensures explicit control of message flow
256
  - **Thread-scoped memory** enables multi-session personalization
257
  - **Hybrid RRF + Cross-Encoder + MMR** retrieval pipeline improves relevance & diversity
258
  - **SSE streaming** for low-latency feedback
259
- - Decoupled **retrieval** and **memory** as separate tools for modularity
260
-
261
- ---
262
-
263
- ## 🚀 Future Enhancements
264
-
265
- - Integrate **tool calling** for external APIs (calendar, search)
266
- - Summarization agents for condensing memory store
267
- - Interest-based retrieval filtering
268
- - Multi-agent orchestration for complex tasks
269
-
270
- ---
271
-
272
- This LangGraph-powered architecture delivers a **stateful, retrieval-augmented, memory-aware personal assistant** optimized for Krishna’s profile and designed for **extensibility, performance, and precision**.
 
1
+ # 🤖 Chatbot Architecture Overview: Krishna's Personal AI Assistant
2
 
3
+ This document details the architecture of **Krishna Vamsi Dhulipalla’s** personal AI assistant, implemented with **LangGraph** for orchestrated state management and tool execution. The system is designed for **retrieval-augmented, memory-grounded, and multi-turn conversational intelligence**, integrating **OpenAI GPT-4o**, **Hugging Face embeddings**, and **cross-encoder reranking**.
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
4
 
5
  ---
6
 
 
109
 
110
  ---
111
 
112
+ ## 🧩 Design Improvements
 
 
 
 
 
 
 
 
 
 
113
 
114
  - **LangGraph StateGraph** ensures explicit control of message flow
115
  - **Thread-scoped memory** enables multi-session personalization
116
  - **Hybrid RRF + Cross-Encoder + MMR** retrieval pipeline improves relevance & diversity
117
  - **SSE streaming** for low-latency feedback
118
+ - **Decoupled retrieval** and **memory** as separate tools for modularity
 
 
 
 
 
 
 
 
 
 
 
 
 
personal_data/aprofile.md DELETED
@@ -1,155 +0,0 @@
1
- # 👋 Hello, I'm Krishna Vamsi Dhulipalla
2
-
3
- I’m a **Machine Learning Engineer** with over **3 years of experience** designing and deploying intelligent AI systems, integrating backend infrastructure, and building real-time data workflows. I specialize in **LLM-powered agents**, **semantic search**, **bioinformatics AI models**, and **cloud-native ML infrastructure**.
4
-
5
- I earned my **M.S. in Computer Science** from **Virginia Tech** in December 2024 with a 3.95/4.0 GPA, focusing on large language models, intelligent agents, and scalable data systems. My work spans the full ML lifecycle—from research and fine-tuning transformer architectures to deploying production-ready applications on AWS and GCP.
6
-
7
- I’m passionate about **LLM-driven systems**, **multi-agent orchestration**, and **domain-adaptive ML**, particularly in **genomic data analysis** and **real-time analytics**.
8
-
9
- ---
10
-
11
- ## 🎯 Career Summary
12
-
13
- - 👨‍💻 3+ years of experience in **ML systems design**, **LLM-powered applications**, and **data engineering**
14
- - 🧬 Proven expertise in **transformer fine-tuning** (LoRA, soft prompting) for genomic classification
15
- - 🤖 Skilled in **LangChain**, **LangGraph**, **AutoGen**, and **CrewAI** for intelligent agent workflows
16
- - ☁️ Deep knowledge of **AWS** (S3, Glue, Lambda, SageMaker, ECS, CloudWatch) and **GCP** (BigQuery, Dataflow, Composer)
17
- - ⚡ Experienced in **real-time data pipelines** using **Apache Kafka**, **Spark**, **Airflow**, and **dbt**
18
- - 📊 Strong foundation in **synthetic data generation**, **domain adaptation**, and **cross-domain NER**
19
-
20
- ## 🔭 Areas of Current Focus
21
-
22
- - Developing **LLM-powered mobile automation agents** for UI task execution
23
- - Architecting **retrieval-augmented generation (RAG)** systems with hybrid retrieval and cross-encoder reranking
24
- - Fine-tuning **DNA foundation models** like DNABERT & HyenaDNA for plant genomics
25
- - Building **real-time analytics pipelines** integrating Kafka, Spark, Airflow, and cloud services
26
-
27
- ---
28
-
29
- ## 🎓 Education
30
-
31
- ### Virginia Tech — M.S. in Computer Science
32
-
33
- 📍 Blacksburg, VA | Jan 2023 – Dec 2024
34
- **GPA:** 3.95 / 4.0
35
- Relevant Coursework: Distributed Systems, Machine Learning Optimization, Genomics, LLMs & Transformer Architectures
36
-
37
- ### Anna University — B.Tech in Computer Science and Engineering
38
-
39
- 📍 Chennai, India | Jun 2018 – May 2022
40
- **GPA:** 8.24 / 10
41
- Specialization: Real-Time Analytics, Cloud Systems, Software Engineering Principles
42
-
43
- ---
44
-
45
- ## 🛠️ Technical Skills
46
-
47
- **Programming:** Python, R, SQL, JavaScript, TypeScript, Node.js, FastAPI, MongoDB
48
- **ML Frameworks:** PyTorch, TensorFlow, scikit-learn, Hugging Face Transformers
49
- **LLM & Agents:** LangChain, LangGraph, AutoGen, CrewAI, Prompt Engineering, RAG, LoRA, GANs
50
- **ML Techniques:** Self-Supervised Learning, Cross-Domain Adaptation, Hyperparameter Optimization, A/B Testing
51
- **Data Engineering:** Apache Spark, Kafka, dbt, Airflow, ETL Pipelines, Delta Lake, Snowflake
52
- **Cloud & Infra:** AWS (S3, Glue, Lambda, Redshift, ECS, SageMaker, CloudWatch), GCP (GCS, BigQuery, Dataflow, Composer)
53
- **DevOps/MLOps:** Docker, Kubernetes, MLflow, CI/CD, Weights & Biases
54
- **Visualization:** Tableau, Shiny (R), Plotly, Matplotlib
55
- **Other Tools:** Pandas, NumPy, Git, LangSmith, LangFlow, Linux
56
-
57
- ---
58
-
59
- ## 💼 Professional Experience
60
-
61
- ### Cloud Systems LLC — ML Research Engineer (Current role)
62
-
63
- 📍 Remote | Jul 2024 – Present
64
-
65
- - Designed and optimized **SQL-based data retrieval** and **batch + real-time pipelines**
66
- - Built automated **ETL workflows** integrating multiple data sources
67
-
68
- ### Virginia Tech — ML Research Engineer
69
-
70
- 📍 Blacksburg, VA | Sep 2024 – Jul 2024
71
-
72
- - Developed **DNA sequence classification pipelines** using DNABERT & HyenaDNA with LoRA & soft prompting (94%+ accuracy)
73
- - Automated preprocessing of **1M+ genomic sequences** with Biopython & Airflow, reducing runtime by 40%
74
- - Built **LangChain-based semantic search** for genomics literature
75
- - Deployed fine-tuned LLMs using Docker, MLflow, and optionally SageMaker
76
-
77
- ### Virginia Tech — Research Assistant
78
-
79
- 📍 Blacksburg, VA | Jun 2023 – May 2024
80
-
81
- - Built **genomic ETL pipelines** (Airflow + AWS Glue) improving research data availability by 50%
82
- - Automated retraining workflows via CI/CD, reducing manual workload by 40%
83
- - Benchmarked compute cluster performance to cut runtime costs by 15%
84
-
85
- ### UJR Technologies Pvt Ltd — Data Engineer
86
-
87
- 📍 Hyderabad, India | Jul 2021 – Dec 2022
88
-
89
- - Migrated **batch ETL to real-time streaming** with Kafka & Spark (↓ latency 30%)
90
- - Deployed Dockerized microservices to AWS ECS, improving deployment speed by 25%
91
- - Optimized Snowflake schemas to improve query performance by 40%
92
-
93
- ---
94
-
95
- ## 📊 Highlight Projects
96
-
97
- - **LLM-Based Android Agent** – Multi-step UI automation with memory, self-reflection, and context recovery (80%+ accuracy)
98
-
99
- ### Real-Time IoT-Based Temperature Forecasting
100
-
101
- - Kafka-based pipeline for 10K+ sensor readings with LLaMA 2-based time series model (91% accuracy)
102
- - Airflow + Looker dashboards (↓ manual reporting by 30%)
103
- - S3 lifecycle policies saved 40% storage cost with versioned backups
104
- 🔗 [GitHub](https://github.com/krishna-creator/Real-Time-IoT-Based-Temperature-Analytics-and-Forecasting)
105
-
106
- ### Proxy TuNER: Cross-Domain NER
107
-
108
- - Developed a proxy tuning method for domain-agnostic BERT
109
- - 15% generalization gain using gradient reversal + feature alignment
110
- - 70% cost reduction via logit-level ensembling
111
- 🔗 [GitHub](https://github.com/krishna-creator/ProxytuNER)
112
-
113
- ### IntelliMeet: AI-Powered Conferencing
114
-
115
- - Federated learning, end-to-end encrypted platform
116
- - Live attention detection using RetinaFace (<200ms latency)
117
- - Summarization with Transformer-based speech-to-text
118
- 🔗 [GitHub](https://github.com/krishna-creator/SE-Project---IntelliMeet)
119
-
120
- ### Automated Drone Image Analysis
121
-
122
- - Real-time crop disease detection using drone imagery
123
- - Used OpenCV, RAG, and GANs for synthetic data generation
124
- - Improved detection accuracy by 15% and reduced processing latency by 70%
125
-
126
- ---
127
-
128
- ## 📜 Certifications
129
-
130
- - 🏆 NVIDIA – Building RAG Agents with LLMs
131
- - 🏆 Google Cloud – Data Engineering Foundations
132
- - 🏆 AWS – Machine Learning Specialty
133
- - 🏆 Microsoft – MERN Stack Development
134
- - 🏆 Snowflake – End-to-End Data Engineering
135
- - 🏆 Coursera – Machine Learning Specialization
136
- 🔗 [View All Credentials](https://www.linkedin.com/in/krishnavamsidhulipalla/)
137
-
138
- ---
139
-
140
- ## 📚 Research Publications
141
-
142
- - **IEEE BIBM 2024** – “Leveraging ML for Predicting Circadian Transcription in mRNAs and lncRNAs”
143
- [DOI: 10.1109/BIBM62325.2024.10822684](https://doi.org/10.1109/BIBM62325.2024.10822684)
144
-
145
- - **MLCB** – “Harnessing DNA Foundation Models for TF Binding Prediction in Plants”
146
-
147
- ---
148
-
149
- ## 🔗 External Links / Contact details
150
-
151
- - 🌐 [Personal Portfolio/ personal website](http://krishna-dhulipalla.github.io)
152
- - 🧪 [GitHub](https://github.com/Krishna-dhulipalla)
153
- - 💼 [LinkedIn](https://www.linkedin.com/in/krishnavamsidhulipalla)
154
- - 📬 dhulipallakrishnavamsi@gmail.com
155
- - 🤖 [Personal Chatbot](https://huggingface.co/spaces/krishnadhulipalla/Personal_ChatBot)
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
personal_data/bio.md ADDED
@@ -0,0 +1 @@
 
 
1
+ Krishna Vamsi Dhulipalla is a Software Engineer specializing in generic workflows and AI platforms. He currently works at **Cloud Systems LLC**, where he architects LangGraph-based agents to automate data auditing. Previously, he served as a Machine Learning Engineer at **Virginia Tech**, optimizing genomic models with LoRA/soft prompting, and as a Software Engineer at **UJR Technologies**, building ML SDKs and CI/CD pipelines. He holds an M.S. in Computer Science from Virginia Tech (Dec 2024) and has significant expertise in **LangGraph**, **Kubernetes**, **PyTorch**, and **MLOps**.
personal_data/education.md ADDED
@@ -0,0 +1,11 @@
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Education
2
+
3
+ ## Virginia Tech | Master of Science, Computer Science
4
+
5
+ **Dec 2024**
6
+
7
+ - **GPA**: 3.9/4.0
8
+
9
+ ## Vel Tech University | Bachelor of Technology, Computer Science and Engineering
10
+
11
+ - **GPA**: 8.24/10
personal_data/experience.md ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Professional Experience
2
+
3
+ ## Software Engineer - AI Platform | Cloud Systems LLC | US Remote
4
+
5
+ **Jul 2025 - Present**
6
+
7
+ - **Agentic Workflow Automation**: Architected an agentic workflow using **LangGraph** and **ReAct** to automate SQL generation. This system automated **~65%** of ad-hoc data-auditing requests from internal stakeholders, reducing the average response time from **4 hours to under 2 minutes**.
8
+ - **ETL Optimization**: Optimized data ingestion performance by rebuilding ETL pipelines with batched I/O, incremental refresh logic, and dependency pruning, cutting daily execution runtime by **25%**.
9
+ - **Infrastructure & Reliability**: Improved production reliability by shipping the agent service on **Kubernetes** with autoscaling and rolling deploys, adding alerts and rollback steps for failed releases.
10
+ - **Contract Testing**: Improved cross-service reliability by implementing **Pydantic** schema validation and contract tests, preventing multiple breaking changes from reaching production.
11
+
12
+ ## Machine Learning Engineer | Virginia Tech, Dept. of Plant Sciences | Blacksburg, VA
13
+
14
+ **Aug 2024 - Jul 2025**
15
+
16
+ - **Model Optimization**: Increased genomics sequence classification throughput by **32%** by applying **LoRA** and **soft prompting** methods. Packaged repeatable **PyTorch** pipelines that cut per-experiment training time by **4.5 hours**.
17
+ - **HPC Orchestration**: Developed an ML orchestration layer for distributed GPU training on HPC clusters. Engineered checkpoint-resume logic that handled preemptive node shutdowns, optimizing resource utilization and reducing compute waste by **15%**.
18
+ - **MLOps**: Reduced research environment setup time from hours to minutes by containerizing fine-tuned models with **Docker** and managing the experimental lifecycle (versions, hyperparameters, and weights) via **MLflow**.
19
+
20
+ ## Software Engineer | UJR Technologies Pvt Ltd | Hyderabad, India
21
+
22
+ **Jul 2021 - Dec 2022**
23
+
24
+ - **API & SDK Development**: Designed and maintained standardized **REST APIs** and **Python-based SDKs** to streamline the ML development lifecycle, reducing cross-team integration defects by **40%**.
25
+ - **Model Serving**: Engineered model-serving endpoints with automated input validation and deployment health checks, lowering prediction-related failures by **30%** for ML-driven features.
26
+ - **CI/CD Pipeline**: Automated CI/CD pipelines via **GitHub Actions** with comprehensive test coverage and scripted rollback procedures, decreasing release failures by **20%** across production environments.
personal_data/projects.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Projects
2
+
3
+ ## Autonomous Multi-Agent Web UI Automation System
4
+
5
+ - **Overview**: Developed a multi-agent system using **LangGraph** and **Playwright** to navigate non-deterministic UI changes across 5 high-complexity SaaS platforms.
6
+ - **Impact**: Increased task completion success rate from **68% to 94%** by implementing a two-stage verification loop with step-level assertions and exponential backoff for dynamic DOM states.
7
+ - **Observability**: Integrated **LangSmith** traces for observability, reducing the mean-time-to-debug for broken selectors by **14 minutes per incident**.
8
+
9
+ ## Proxy TuNER: Advancing Cross-Domain Named Entity Recognition through Proxy Tuning
10
+
11
+ - **Overview**: Improved cross-domain NER F1-score by **8%** by implementing a proxy-tuning approach for **LLaMA 2 models** (7B, 7B-Chat, 13B) using logit ensembling and gradient reversal.
12
+ - **Optimization**: Optimized inference performance by **30%** and reduced training costs by **70%** through distributed execution and model path optimizations in **PyTorch**.
13
+
14
+ ## IntelliMeet: AI-Enabled Decentralized Video Conferencing App
15
+
16
+ - **Overview**: Architected a secure, decentralized video platform using **WebRTC** and **federated learning** to maintain data privacy while sharing only aggregated model updates.
17
+ - **Reliability**: Reduced call dropouts by **25%** by engineering network recovery logic and on-device **RetinaFace** attention detection for client-side quality adaptation.
personal_data/publications.md ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ # Publications
2
+
3
+ - **Predicting Circadian Transcription in mRNAs and lncRNAs**, IEEE BIBM 2024
4
+ - **DNA Foundation Models for Cross-Species TF Binding Prediction**, NeurIPS ML in CompBio 2025
5
+ - **Multi-omics atlas of the plant nuclear envelope**, Science Advances (under review) 2025, University of California, Berkeley
personal_data/skills.md ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Technical Skills
2
+
3
+ ## Languages
4
+
5
+ - Python, SQL, TypeScript, JavaScript, MongoDB
6
+
7
+ ## ML & AI Frameworks
8
+
9
+ - PyTorch, Transformers, LangChain, LangGraph, LoRA, RAG, NLP, SKLearn, XGBoost
10
+
11
+ ## Data & Infrastructure
12
+
13
+ - Docker, Kubernetes, Apache Airflow, MLflow, Redis, FAISS, AWS, GCP, Git
14
+
15
+ ## Tools & Observability
16
+
17
+ - LangSmith, Grafana, CI/CD (GitHub Actions/Jenkins), Weights & Biases, Linux
requirements.txt CHANGED
Binary files a/requirements.txt and b/requirements.txt differ
 
ui/package-lock.json CHANGED
@@ -17,6 +17,7 @@
17
  "devDependencies": {
18
  "@eslint/js": "^9.32.0",
19
  "@tailwindcss/cli": "^4.1.11",
 
20
  "@types/react": "^19.1.9",
21
  "@types/react-dom": "^19.1.7",
22
  "@vitejs/plugin-react": "^4.7.0",
@@ -1023,6 +1024,17 @@
1023
  "@jridgewell/trace-mapping": "^0.3.24"
1024
  }
1025
  },
 
 
 
 
 
 
 
 
 
 
 
1026
  "node_modules/@jridgewell/resolve-uri": {
1027
  "version": "3.1.2",
1028
  "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
@@ -1034,9 +1046,9 @@
1034
  }
1035
  },
1036
  "node_modules/@jridgewell/sourcemap-codec": {
1037
- "version": "1.5.4",
1038
- "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.4.tgz",
1039
- "integrity": "sha512-VT2+G1VQs/9oz078bLrYbecdZKs912zQlkelYpuf+SXF+QvZDYJlbx/LSx+meSAwdDFnF8FVXW92AVjjkVmgFw==",
1040
  "dev": true,
1041
  "license": "MIT"
1042
  },
@@ -1976,6 +1988,535 @@
1976
  "node": ">=8"
1977
  }
1978
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1979
  "node_modules/@types/babel__core": {
1980
  "version": "7.20.5",
1981
  "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
@@ -3503,9 +4044,9 @@
3503
  "license": "ISC"
3504
  },
3505
  "node_modules/jiti": {
3506
- "version": "2.5.1",
3507
- "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.5.1.tgz",
3508
- "integrity": "sha512-twQoecYPiVA5K/h6SxtORw/Bs3ar+mLUtoPSc7iMXzQzK8d7eJ/R09wmTwAjiamETn1cXYPGfNnu7DMoHgu12w==",
3509
  "dev": true,
3510
  "license": "MIT",
3511
  "bin": {
@@ -3632,6 +4173,27 @@
3632
  "lightningcss-win32-x64-msvc": "1.30.1"
3633
  }
3634
  },
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
3635
  "node_modules/lightningcss-darwin-arm64": {
3636
  "version": "1.30.1",
3637
  "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.1.tgz",
@@ -3896,13 +4458,13 @@
3896
  }
3897
  },
3898
  "node_modules/magic-string": {
3899
- "version": "0.30.17",
3900
- "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.17.tgz",
3901
- "integrity": "sha512-sNPKHvyjVf7gyjwS4xGTaW/mCnF8wnjtifKBEhxfZ7E/S8tQ0rssrwGNn6q8JH/ohItJfSQp9mBtQYuTlH5QnA==",
3902
  "dev": true,
3903
  "license": "MIT",
3904
  "dependencies": {
3905
- "@jridgewell/sourcemap-codec": "^1.5.0"
3906
  }
3907
  },
3908
  "node_modules/markdown-table": {
 
17
  "devDependencies": {
18
  "@eslint/js": "^9.32.0",
19
  "@tailwindcss/cli": "^4.1.11",
20
+ "@tailwindcss/vite": "^4.1.18",
21
  "@types/react": "^19.1.9",
22
  "@types/react-dom": "^19.1.7",
23
  "@vitejs/plugin-react": "^4.7.0",
 
1024
  "@jridgewell/trace-mapping": "^0.3.24"
1025
  }
1026
  },
1027
+ "node_modules/@jridgewell/remapping": {
1028
+ "version": "2.3.5",
1029
+ "resolved": "https://registry.npmjs.org/@jridgewell/remapping/-/remapping-2.3.5.tgz",
1030
+ "integrity": "sha512-LI9u/+laYG4Ds1TDKSJW2YPrIlcVYOwi2fUC6xB43lueCjgxV4lffOCZCtYFiH6TNOX+tQKXx97T4IKHbhyHEQ==",
1031
+ "dev": true,
1032
+ "license": "MIT",
1033
+ "dependencies": {
1034
+ "@jridgewell/gen-mapping": "^0.3.5",
1035
+ "@jridgewell/trace-mapping": "^0.3.24"
1036
+ }
1037
+ },
1038
  "node_modules/@jridgewell/resolve-uri": {
1039
  "version": "3.1.2",
1040
  "resolved": "https://registry.npmjs.org/@jridgewell/resolve-uri/-/resolve-uri-3.1.2.tgz",
 
1046
  }
1047
  },
1048
  "node_modules/@jridgewell/sourcemap-codec": {
1049
+ "version": "1.5.5",
1050
+ "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.5.tgz",
1051
+ "integrity": "sha512-cYQ9310grqxueWbl+WuIUIaiUaDcj7WOq5fVhEljNVgRfOUhY9fy2zTvfoqWsnebh8Sl70VScFbICvJnLKB0Og==",
1052
  "dev": true,
1053
  "license": "MIT"
1054
  },
 
1988
  "node": ">=8"
1989
  }
1990
  },
1991
+ "node_modules/@tailwindcss/vite": {
1992
+ "version": "4.1.18",
1993
+ "resolved": "https://registry.npmjs.org/@tailwindcss/vite/-/vite-4.1.18.tgz",
1994
+ "integrity": "sha512-jVA+/UpKL1vRLg6Hkao5jldawNmRo7mQYrZtNHMIVpLfLhDml5nMRUo/8MwoX2vNXvnaXNNMedrMfMugAVX1nA==",
1995
+ "dev": true,
1996
+ "license": "MIT",
1997
+ "dependencies": {
1998
+ "@tailwindcss/node": "4.1.18",
1999
+ "@tailwindcss/oxide": "4.1.18",
2000
+ "tailwindcss": "4.1.18"
2001
+ },
2002
+ "peerDependencies": {
2003
+ "vite": "^5.2.0 || ^6 || ^7"
2004
+ }
2005
+ },
2006
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/node": {
2007
+ "version": "4.1.18",
2008
+ "resolved": "https://registry.npmjs.org/@tailwindcss/node/-/node-4.1.18.tgz",
2009
+ "integrity": "sha512-DoR7U1P7iYhw16qJ49fgXUlry1t4CpXeErJHnQ44JgTSKMaZUdf17cfn5mHchfJ4KRBZRFA/Coo+MUF5+gOaCQ==",
2010
+ "dev": true,
2011
+ "license": "MIT",
2012
+ "dependencies": {
2013
+ "@jridgewell/remapping": "^2.3.4",
2014
+ "enhanced-resolve": "^5.18.3",
2015
+ "jiti": "^2.6.1",
2016
+ "lightningcss": "1.30.2",
2017
+ "magic-string": "^0.30.21",
2018
+ "source-map-js": "^1.2.1",
2019
+ "tailwindcss": "4.1.18"
2020
+ }
2021
+ },
2022
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide": {
2023
+ "version": "4.1.18",
2024
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide/-/oxide-4.1.18.tgz",
2025
+ "integrity": "sha512-EgCR5tTS5bUSKQgzeMClT6iCY3ToqE1y+ZB0AKldj809QXk1Y+3jB0upOYZrn9aGIzPtUsP7sX4QQ4XtjBB95A==",
2026
+ "dev": true,
2027
+ "license": "MIT",
2028
+ "engines": {
2029
+ "node": ">= 10"
2030
+ },
2031
+ "optionalDependencies": {
2032
+ "@tailwindcss/oxide-android-arm64": "4.1.18",
2033
+ "@tailwindcss/oxide-darwin-arm64": "4.1.18",
2034
+ "@tailwindcss/oxide-darwin-x64": "4.1.18",
2035
+ "@tailwindcss/oxide-freebsd-x64": "4.1.18",
2036
+ "@tailwindcss/oxide-linux-arm-gnueabihf": "4.1.18",
2037
+ "@tailwindcss/oxide-linux-arm64-gnu": "4.1.18",
2038
+ "@tailwindcss/oxide-linux-arm64-musl": "4.1.18",
2039
+ "@tailwindcss/oxide-linux-x64-gnu": "4.1.18",
2040
+ "@tailwindcss/oxide-linux-x64-musl": "4.1.18",
2041
+ "@tailwindcss/oxide-wasm32-wasi": "4.1.18",
2042
+ "@tailwindcss/oxide-win32-arm64-msvc": "4.1.18",
2043
+ "@tailwindcss/oxide-win32-x64-msvc": "4.1.18"
2044
+ }
2045
+ },
2046
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-android-arm64": {
2047
+ "version": "4.1.18",
2048
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-android-arm64/-/oxide-android-arm64-4.1.18.tgz",
2049
+ "integrity": "sha512-dJHz7+Ugr9U/diKJA0W6N/6/cjI+ZTAoxPf9Iz9BFRF2GzEX8IvXxFIi/dZBloVJX/MZGvRuFA9rqwdiIEZQ0Q==",
2050
+ "cpu": [
2051
+ "arm64"
2052
+ ],
2053
+ "dev": true,
2054
+ "license": "MIT",
2055
+ "optional": true,
2056
+ "os": [
2057
+ "android"
2058
+ ],
2059
+ "engines": {
2060
+ "node": ">= 10"
2061
+ }
2062
+ },
2063
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-darwin-arm64": {
2064
+ "version": "4.1.18",
2065
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-arm64/-/oxide-darwin-arm64-4.1.18.tgz",
2066
+ "integrity": "sha512-Gc2q4Qhs660bhjyBSKgq6BYvwDz4G+BuyJ5H1xfhmDR3D8HnHCmT/BSkvSL0vQLy/nkMLY20PQ2OoYMO15Jd0A==",
2067
+ "cpu": [
2068
+ "arm64"
2069
+ ],
2070
+ "dev": true,
2071
+ "license": "MIT",
2072
+ "optional": true,
2073
+ "os": [
2074
+ "darwin"
2075
+ ],
2076
+ "engines": {
2077
+ "node": ">= 10"
2078
+ }
2079
+ },
2080
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-darwin-x64": {
2081
+ "version": "4.1.18",
2082
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-darwin-x64/-/oxide-darwin-x64-4.1.18.tgz",
2083
+ "integrity": "sha512-FL5oxr2xQsFrc3X9o1fjHKBYBMD1QZNyc1Xzw/h5Qu4XnEBi3dZn96HcHm41c/euGV+GRiXFfh2hUCyKi/e+yw==",
2084
+ "cpu": [
2085
+ "x64"
2086
+ ],
2087
+ "dev": true,
2088
+ "license": "MIT",
2089
+ "optional": true,
2090
+ "os": [
2091
+ "darwin"
2092
+ ],
2093
+ "engines": {
2094
+ "node": ">= 10"
2095
+ }
2096
+ },
2097
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-freebsd-x64": {
2098
+ "version": "4.1.18",
2099
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-freebsd-x64/-/oxide-freebsd-x64-4.1.18.tgz",
2100
+ "integrity": "sha512-Fj+RHgu5bDodmV1dM9yAxlfJwkkWvLiRjbhuO2LEtwtlYlBgiAT4x/j5wQr1tC3SANAgD+0YcmWVrj8R9trVMA==",
2101
+ "cpu": [
2102
+ "x64"
2103
+ ],
2104
+ "dev": true,
2105
+ "license": "MIT",
2106
+ "optional": true,
2107
+ "os": [
2108
+ "freebsd"
2109
+ ],
2110
+ "engines": {
2111
+ "node": ">= 10"
2112
+ }
2113
+ },
2114
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-linux-arm-gnueabihf": {
2115
+ "version": "4.1.18",
2116
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm-gnueabihf/-/oxide-linux-arm-gnueabihf-4.1.18.tgz",
2117
+ "integrity": "sha512-Fp+Wzk/Ws4dZn+LV2Nqx3IilnhH51YZoRaYHQsVq3RQvEl+71VGKFpkfHrLM/Li+kt5c0DJe/bHXK1eHgDmdiA==",
2118
+ "cpu": [
2119
+ "arm"
2120
+ ],
2121
+ "dev": true,
2122
+ "license": "MIT",
2123
+ "optional": true,
2124
+ "os": [
2125
+ "linux"
2126
+ ],
2127
+ "engines": {
2128
+ "node": ">= 10"
2129
+ }
2130
+ },
2131
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-linux-arm64-gnu": {
2132
+ "version": "4.1.18",
2133
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-gnu/-/oxide-linux-arm64-gnu-4.1.18.tgz",
2134
+ "integrity": "sha512-S0n3jboLysNbh55Vrt7pk9wgpyTTPD0fdQeh7wQfMqLPM/Hrxi+dVsLsPrycQjGKEQk85Kgbx+6+QnYNiHalnw==",
2135
+ "cpu": [
2136
+ "arm64"
2137
+ ],
2138
+ "dev": true,
2139
+ "license": "MIT",
2140
+ "optional": true,
2141
+ "os": [
2142
+ "linux"
2143
+ ],
2144
+ "engines": {
2145
+ "node": ">= 10"
2146
+ }
2147
+ },
2148
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-linux-arm64-musl": {
2149
+ "version": "4.1.18",
2150
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-arm64-musl/-/oxide-linux-arm64-musl-4.1.18.tgz",
2151
+ "integrity": "sha512-1px92582HkPQlaaCkdRcio71p8bc8i/ap5807tPRDK/uw953cauQBT8c5tVGkOwrHMfc2Yh6UuxaH4vtTjGvHg==",
2152
+ "cpu": [
2153
+ "arm64"
2154
+ ],
2155
+ "dev": true,
2156
+ "license": "MIT",
2157
+ "optional": true,
2158
+ "os": [
2159
+ "linux"
2160
+ ],
2161
+ "engines": {
2162
+ "node": ">= 10"
2163
+ }
2164
+ },
2165
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-linux-x64-gnu": {
2166
+ "version": "4.1.18",
2167
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-gnu/-/oxide-linux-x64-gnu-4.1.18.tgz",
2168
+ "integrity": "sha512-v3gyT0ivkfBLoZGF9LyHmts0Isc8jHZyVcbzio6Wpzifg/+5ZJpDiRiUhDLkcr7f/r38SWNe7ucxmGW3j3Kb/g==",
2169
+ "cpu": [
2170
+ "x64"
2171
+ ],
2172
+ "dev": true,
2173
+ "license": "MIT",
2174
+ "optional": true,
2175
+ "os": [
2176
+ "linux"
2177
+ ],
2178
+ "engines": {
2179
+ "node": ">= 10"
2180
+ }
2181
+ },
2182
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-linux-x64-musl": {
2183
+ "version": "4.1.18",
2184
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-linux-x64-musl/-/oxide-linux-x64-musl-4.1.18.tgz",
2185
+ "integrity": "sha512-bhJ2y2OQNlcRwwgOAGMY0xTFStt4/wyU6pvI6LSuZpRgKQwxTec0/3Scu91O8ir7qCR3AuepQKLU/kX99FouqQ==",
2186
+ "cpu": [
2187
+ "x64"
2188
+ ],
2189
+ "dev": true,
2190
+ "license": "MIT",
2191
+ "optional": true,
2192
+ "os": [
2193
+ "linux"
2194
+ ],
2195
+ "engines": {
2196
+ "node": ">= 10"
2197
+ }
2198
+ },
2199
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-wasm32-wasi": {
2200
+ "version": "4.1.18",
2201
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-wasm32-wasi/-/oxide-wasm32-wasi-4.1.18.tgz",
2202
+ "integrity": "sha512-LffYTvPjODiP6PT16oNeUQJzNVyJl1cjIebq/rWWBF+3eDst5JGEFSc5cWxyRCJ0Mxl+KyIkqRxk1XPEs9x8TA==",
2203
+ "bundleDependencies": [
2204
+ "@napi-rs/wasm-runtime",
2205
+ "@emnapi/core",
2206
+ "@emnapi/runtime",
2207
+ "@tybys/wasm-util",
2208
+ "@emnapi/wasi-threads",
2209
+ "tslib"
2210
+ ],
2211
+ "cpu": [
2212
+ "wasm32"
2213
+ ],
2214
+ "dev": true,
2215
+ "license": "MIT",
2216
+ "optional": true,
2217
+ "dependencies": {
2218
+ "@emnapi/core": "^1.7.1",
2219
+ "@emnapi/runtime": "^1.7.1",
2220
+ "@emnapi/wasi-threads": "^1.1.0",
2221
+ "@napi-rs/wasm-runtime": "^1.1.0",
2222
+ "@tybys/wasm-util": "^0.10.1",
2223
+ "tslib": "^2.4.0"
2224
+ },
2225
+ "engines": {
2226
+ "node": ">=14.0.0"
2227
+ }
2228
+ },
2229
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-win32-arm64-msvc": {
2230
+ "version": "4.1.18",
2231
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-arm64-msvc/-/oxide-win32-arm64-msvc-4.1.18.tgz",
2232
+ "integrity": "sha512-HjSA7mr9HmC8fu6bdsZvZ+dhjyGCLdotjVOgLA2vEqxEBZaQo9YTX4kwgEvPCpRh8o4uWc4J/wEoFzhEmjvPbA==",
2233
+ "cpu": [
2234
+ "arm64"
2235
+ ],
2236
+ "dev": true,
2237
+ "license": "MIT",
2238
+ "optional": true,
2239
+ "os": [
2240
+ "win32"
2241
+ ],
2242
+ "engines": {
2243
+ "node": ">= 10"
2244
+ }
2245
+ },
2246
+ "node_modules/@tailwindcss/vite/node_modules/@tailwindcss/oxide-win32-x64-msvc": {
2247
+ "version": "4.1.18",
2248
+ "resolved": "https://registry.npmjs.org/@tailwindcss/oxide-win32-x64-msvc/-/oxide-win32-x64-msvc-4.1.18.tgz",
2249
+ "integrity": "sha512-bJWbyYpUlqamC8dpR7pfjA0I7vdF6t5VpUGMWRkXVE3AXgIZjYUYAK7II1GNaxR8J1SSrSrppRar8G++JekE3Q==",
2250
+ "cpu": [
2251
+ "x64"
2252
+ ],
2253
+ "dev": true,
2254
+ "license": "MIT",
2255
+ "optional": true,
2256
+ "os": [
2257
+ "win32"
2258
+ ],
2259
+ "engines": {
2260
+ "node": ">= 10"
2261
+ }
2262
+ },
2263
+ "node_modules/@tailwindcss/vite/node_modules/detect-libc": {
2264
+ "version": "2.1.2",
2265
+ "resolved": "https://registry.npmjs.org/detect-libc/-/detect-libc-2.1.2.tgz",
2266
+ "integrity": "sha512-Btj2BOOO83o3WyH59e8MgXsxEQVcarkUOpEYrubB0urwnN10yQ364rsiByU11nZlqWYZm05i/of7io4mzihBtQ==",
2267
+ "dev": true,
2268
+ "license": "Apache-2.0",
2269
+ "engines": {
2270
+ "node": ">=8"
2271
+ }
2272
+ },
2273
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss": {
2274
+ "version": "1.30.2",
2275
+ "resolved": "https://registry.npmjs.org/lightningcss/-/lightningcss-1.30.2.tgz",
2276
+ "integrity": "sha512-utfs7Pr5uJyyvDETitgsaqSyjCb2qNRAtuqUeWIAKztsOYdcACf2KtARYXg2pSvhkt+9NfoaNY7fxjl6nuMjIQ==",
2277
+ "dev": true,
2278
+ "license": "MPL-2.0",
2279
+ "dependencies": {
2280
+ "detect-libc": "^2.0.3"
2281
+ },
2282
+ "engines": {
2283
+ "node": ">= 12.0.0"
2284
+ },
2285
+ "funding": {
2286
+ "type": "opencollective",
2287
+ "url": "https://opencollective.com/parcel"
2288
+ },
2289
+ "optionalDependencies": {
2290
+ "lightningcss-android-arm64": "1.30.2",
2291
+ "lightningcss-darwin-arm64": "1.30.2",
2292
+ "lightningcss-darwin-x64": "1.30.2",
2293
+ "lightningcss-freebsd-x64": "1.30.2",
2294
+ "lightningcss-linux-arm-gnueabihf": "1.30.2",
2295
+ "lightningcss-linux-arm64-gnu": "1.30.2",
2296
+ "lightningcss-linux-arm64-musl": "1.30.2",
2297
+ "lightningcss-linux-x64-gnu": "1.30.2",
2298
+ "lightningcss-linux-x64-musl": "1.30.2",
2299
+ "lightningcss-win32-arm64-msvc": "1.30.2",
2300
+ "lightningcss-win32-x64-msvc": "1.30.2"
2301
+ }
2302
+ },
2303
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-darwin-arm64": {
2304
+ "version": "1.30.2",
2305
+ "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.2.tgz",
2306
+ "integrity": "sha512-ylTcDJBN3Hp21TdhRT5zBOIi73P6/W0qwvlFEk22fkdXchtNTOU4Qc37SkzV+EKYxLouZ6M4LG9NfZ1qkhhBWA==",
2307
+ "cpu": [
2308
+ "arm64"
2309
+ ],
2310
+ "dev": true,
2311
+ "license": "MPL-2.0",
2312
+ "optional": true,
2313
+ "os": [
2314
+ "darwin"
2315
+ ],
2316
+ "engines": {
2317
+ "node": ">= 12.0.0"
2318
+ },
2319
+ "funding": {
2320
+ "type": "opencollective",
2321
+ "url": "https://opencollective.com/parcel"
2322
+ }
2323
+ },
2324
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-darwin-x64": {
2325
+ "version": "1.30.2",
2326
+ "resolved": "https://registry.npmjs.org/lightningcss-darwin-x64/-/lightningcss-darwin-x64-1.30.2.tgz",
2327
+ "integrity": "sha512-oBZgKchomuDYxr7ilwLcyms6BCyLn0z8J0+ZZmfpjwg9fRVZIR5/GMXd7r9RH94iDhld3UmSjBM6nXWM2TfZTQ==",
2328
+ "cpu": [
2329
+ "x64"
2330
+ ],
2331
+ "dev": true,
2332
+ "license": "MPL-2.0",
2333
+ "optional": true,
2334
+ "os": [
2335
+ "darwin"
2336
+ ],
2337
+ "engines": {
2338
+ "node": ">= 12.0.0"
2339
+ },
2340
+ "funding": {
2341
+ "type": "opencollective",
2342
+ "url": "https://opencollective.com/parcel"
2343
+ }
2344
+ },
2345
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-freebsd-x64": {
2346
+ "version": "1.30.2",
2347
+ "resolved": "https://registry.npmjs.org/lightningcss-freebsd-x64/-/lightningcss-freebsd-x64-1.30.2.tgz",
2348
+ "integrity": "sha512-c2bH6xTrf4BDpK8MoGG4Bd6zAMZDAXS569UxCAGcA7IKbHNMlhGQ89eRmvpIUGfKWNVdbhSbkQaWhEoMGmGslA==",
2349
+ "cpu": [
2350
+ "x64"
2351
+ ],
2352
+ "dev": true,
2353
+ "license": "MPL-2.0",
2354
+ "optional": true,
2355
+ "os": [
2356
+ "freebsd"
2357
+ ],
2358
+ "engines": {
2359
+ "node": ">= 12.0.0"
2360
+ },
2361
+ "funding": {
2362
+ "type": "opencollective",
2363
+ "url": "https://opencollective.com/parcel"
2364
+ }
2365
+ },
2366
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-linux-arm-gnueabihf": {
2367
+ "version": "1.30.2",
2368
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm-gnueabihf/-/lightningcss-linux-arm-gnueabihf-1.30.2.tgz",
2369
+ "integrity": "sha512-eVdpxh4wYcm0PofJIZVuYuLiqBIakQ9uFZmipf6LF/HRj5Bgm0eb3qL/mr1smyXIS1twwOxNWndd8z0E374hiA==",
2370
+ "cpu": [
2371
+ "arm"
2372
+ ],
2373
+ "dev": true,
2374
+ "license": "MPL-2.0",
2375
+ "optional": true,
2376
+ "os": [
2377
+ "linux"
2378
+ ],
2379
+ "engines": {
2380
+ "node": ">= 12.0.0"
2381
+ },
2382
+ "funding": {
2383
+ "type": "opencollective",
2384
+ "url": "https://opencollective.com/parcel"
2385
+ }
2386
+ },
2387
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-linux-arm64-gnu": {
2388
+ "version": "1.30.2",
2389
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-gnu/-/lightningcss-linux-arm64-gnu-1.30.2.tgz",
2390
+ "integrity": "sha512-UK65WJAbwIJbiBFXpxrbTNArtfuznvxAJw4Q2ZGlU8kPeDIWEX1dg3rn2veBVUylA2Ezg89ktszWbaQnxD/e3A==",
2391
+ "cpu": [
2392
+ "arm64"
2393
+ ],
2394
+ "dev": true,
2395
+ "license": "MPL-2.0",
2396
+ "optional": true,
2397
+ "os": [
2398
+ "linux"
2399
+ ],
2400
+ "engines": {
2401
+ "node": ">= 12.0.0"
2402
+ },
2403
+ "funding": {
2404
+ "type": "opencollective",
2405
+ "url": "https://opencollective.com/parcel"
2406
+ }
2407
+ },
2408
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-linux-arm64-musl": {
2409
+ "version": "1.30.2",
2410
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-arm64-musl/-/lightningcss-linux-arm64-musl-1.30.2.tgz",
2411
+ "integrity": "sha512-5Vh9dGeblpTxWHpOx8iauV02popZDsCYMPIgiuw97OJ5uaDsL86cnqSFs5LZkG3ghHoX5isLgWzMs+eD1YzrnA==",
2412
+ "cpu": [
2413
+ "arm64"
2414
+ ],
2415
+ "dev": true,
2416
+ "license": "MPL-2.0",
2417
+ "optional": true,
2418
+ "os": [
2419
+ "linux"
2420
+ ],
2421
+ "engines": {
2422
+ "node": ">= 12.0.0"
2423
+ },
2424
+ "funding": {
2425
+ "type": "opencollective",
2426
+ "url": "https://opencollective.com/parcel"
2427
+ }
2428
+ },
2429
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-linux-x64-gnu": {
2430
+ "version": "1.30.2",
2431
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-gnu/-/lightningcss-linux-x64-gnu-1.30.2.tgz",
2432
+ "integrity": "sha512-Cfd46gdmj1vQ+lR6VRTTadNHu6ALuw2pKR9lYq4FnhvgBc4zWY1EtZcAc6EffShbb1MFrIPfLDXD6Xprbnni4w==",
2433
+ "cpu": [
2434
+ "x64"
2435
+ ],
2436
+ "dev": true,
2437
+ "license": "MPL-2.0",
2438
+ "optional": true,
2439
+ "os": [
2440
+ "linux"
2441
+ ],
2442
+ "engines": {
2443
+ "node": ">= 12.0.0"
2444
+ },
2445
+ "funding": {
2446
+ "type": "opencollective",
2447
+ "url": "https://opencollective.com/parcel"
2448
+ }
2449
+ },
2450
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-linux-x64-musl": {
2451
+ "version": "1.30.2",
2452
+ "resolved": "https://registry.npmjs.org/lightningcss-linux-x64-musl/-/lightningcss-linux-x64-musl-1.30.2.tgz",
2453
+ "integrity": "sha512-XJaLUUFXb6/QG2lGIW6aIk6jKdtjtcffUT0NKvIqhSBY3hh9Ch+1LCeH80dR9q9LBjG3ewbDjnumefsLsP6aiA==",
2454
+ "cpu": [
2455
+ "x64"
2456
+ ],
2457
+ "dev": true,
2458
+ "license": "MPL-2.0",
2459
+ "optional": true,
2460
+ "os": [
2461
+ "linux"
2462
+ ],
2463
+ "engines": {
2464
+ "node": ">= 12.0.0"
2465
+ },
2466
+ "funding": {
2467
+ "type": "opencollective",
2468
+ "url": "https://opencollective.com/parcel"
2469
+ }
2470
+ },
2471
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-win32-arm64-msvc": {
2472
+ "version": "1.30.2",
2473
+ "resolved": "https://registry.npmjs.org/lightningcss-win32-arm64-msvc/-/lightningcss-win32-arm64-msvc-1.30.2.tgz",
2474
+ "integrity": "sha512-FZn+vaj7zLv//D/192WFFVA0RgHawIcHqLX9xuWiQt7P0PtdFEVaxgF9rjM/IRYHQXNnk61/H/gb2Ei+kUQ4xQ==",
2475
+ "cpu": [
2476
+ "arm64"
2477
+ ],
2478
+ "dev": true,
2479
+ "license": "MPL-2.0",
2480
+ "optional": true,
2481
+ "os": [
2482
+ "win32"
2483
+ ],
2484
+ "engines": {
2485
+ "node": ">= 12.0.0"
2486
+ },
2487
+ "funding": {
2488
+ "type": "opencollective",
2489
+ "url": "https://opencollective.com/parcel"
2490
+ }
2491
+ },
2492
+ "node_modules/@tailwindcss/vite/node_modules/lightningcss-win32-x64-msvc": {
2493
+ "version": "1.30.2",
2494
+ "resolved": "https://registry.npmjs.org/lightningcss-win32-x64-msvc/-/lightningcss-win32-x64-msvc-1.30.2.tgz",
2495
+ "integrity": "sha512-5g1yc73p+iAkid5phb4oVFMB45417DkRevRbt/El/gKXJk4jid+vPFF/AXbxn05Aky8PapwzZrdJShv5C0avjw==",
2496
+ "cpu": [
2497
+ "x64"
2498
+ ],
2499
+ "dev": true,
2500
+ "license": "MPL-2.0",
2501
+ "optional": true,
2502
+ "os": [
2503
+ "win32"
2504
+ ],
2505
+ "engines": {
2506
+ "node": ">= 12.0.0"
2507
+ },
2508
+ "funding": {
2509
+ "type": "opencollective",
2510
+ "url": "https://opencollective.com/parcel"
2511
+ }
2512
+ },
2513
+ "node_modules/@tailwindcss/vite/node_modules/tailwindcss": {
2514
+ "version": "4.1.18",
2515
+ "resolved": "https://registry.npmjs.org/tailwindcss/-/tailwindcss-4.1.18.tgz",
2516
+ "integrity": "sha512-4+Z+0yiYyEtUVCScyfHCxOYP06L5Ne+JiHhY2IjR2KWMIWhJOYZKLSGZaP5HkZ8+bY0cxfzwDE5uOmzFXyIwxw==",
2517
+ "dev": true,
2518
+ "license": "MIT"
2519
+ },
2520
  "node_modules/@types/babel__core": {
2521
  "version": "7.20.5",
2522
  "resolved": "https://registry.npmjs.org/@types/babel__core/-/babel__core-7.20.5.tgz",
 
4044
  "license": "ISC"
4045
  },
4046
  "node_modules/jiti": {
4047
+ "version": "2.6.1",
4048
+ "resolved": "https://registry.npmjs.org/jiti/-/jiti-2.6.1.tgz",
4049
+ "integrity": "sha512-ekilCSN1jwRvIbgeg/57YFh8qQDNbwDb9xT/qu2DAHbFFZUicIl4ygVaAvzveMhMVr3LnpSKTNnwt8PoOfmKhQ==",
4050
  "dev": true,
4051
  "license": "MIT",
4052
  "bin": {
 
4173
  "lightningcss-win32-x64-msvc": "1.30.1"
4174
  }
4175
  },
4176
+ "node_modules/lightningcss-android-arm64": {
4177
+ "version": "1.30.2",
4178
+ "resolved": "https://registry.npmjs.org/lightningcss-android-arm64/-/lightningcss-android-arm64-1.30.2.tgz",
4179
+ "integrity": "sha512-BH9sEdOCahSgmkVhBLeU7Hc9DWeZ1Eb6wNS6Da8igvUwAe0sqROHddIlvU06q3WyXVEOYDZ6ykBZQnjTbmo4+A==",
4180
+ "cpu": [
4181
+ "arm64"
4182
+ ],
4183
+ "dev": true,
4184
+ "license": "MPL-2.0",
4185
+ "optional": true,
4186
+ "os": [
4187
+ "android"
4188
+ ],
4189
+ "engines": {
4190
+ "node": ">= 12.0.0"
4191
+ },
4192
+ "funding": {
4193
+ "type": "opencollective",
4194
+ "url": "https://opencollective.com/parcel"
4195
+ }
4196
+ },
4197
  "node_modules/lightningcss-darwin-arm64": {
4198
  "version": "1.30.1",
4199
  "resolved": "https://registry.npmjs.org/lightningcss-darwin-arm64/-/lightningcss-darwin-arm64-1.30.1.tgz",
 
4458
  }
4459
  },
4460
  "node_modules/magic-string": {
4461
+ "version": "0.30.21",
4462
+ "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.21.tgz",
4463
+ "integrity": "sha512-vd2F4YUyEXKGcLHoq+TEyCjxueSeHnFxyyjNp80yg0XV4vUhnDer/lvvlqM/arB5bXQN5K2/3oinyCRyx8T2CQ==",
4464
  "dev": true,
4465
  "license": "MIT",
4466
  "dependencies": {
4467
+ "@jridgewell/sourcemap-codec": "^1.5.5"
4468
  }
4469
  },
4470
  "node_modules/markdown-table": {
ui/package.json CHANGED
@@ -21,6 +21,7 @@
21
  "devDependencies": {
22
  "@eslint/js": "^9.32.0",
23
  "@tailwindcss/cli": "^4.1.11",
 
24
  "@types/react": "^19.1.9",
25
  "@types/react-dom": "^19.1.7",
26
  "@vitejs/plugin-react": "^4.7.0",
 
21
  "devDependencies": {
22
  "@eslint/js": "^9.32.0",
23
  "@tailwindcss/cli": "^4.1.11",
24
+ "@tailwindcss/vite": "^4.1.18",
25
  "@types/react": "^19.1.9",
26
  "@types/react-dom": "^19.1.7",
27
  "@vitejs/plugin-react": "^4.7.0",
ui/src/App.tsx CHANGED
@@ -5,49 +5,116 @@ import React, {
5
  useRef,
6
  useState,
7
  } from "react";
8
- import { BookmarkIcon } from "@heroicons/react/24/outline";
9
- import { TrashIcon } from "@heroicons/react/24/outline";
 
 
 
 
 
 
 
 
 
 
 
 
10
  import ReactMarkdown from "react-markdown";
11
  import remarkGfm from "remark-gfm";
12
- import { SparklesIcon } from "@heroicons/react/24/outline";
13
-
14
- // const API_PATH = "/chat";
15
- // const THREAD_KEY = "lg_thread_id";
16
 
 
17
  import { useChat } from "./useChat";
18
  import type { ThreadMeta } from "./threads";
19
 
 
 
 
 
 
 
 
 
 
 
 
 
20
  const SUGGESTIONS = [
21
  {
22
- title: "Quick intro to Krishna",
23
  text: "Give me a 90-second intro to Krishna Vamsi Dhulipalla—recent work, top strengths, and impact.",
 
24
  },
25
  {
26
- title: "Get Krishna’s resume",
27
  text: "Share Krishna’s latest resume and provide a download link.",
 
28
  },
29
  {
30
- title: "What this agent can do",
31
- text: "What tools and actions can you perform for me? Show examples and how to use them.",
 
32
  },
33
  {
34
- title: "Schedule/modify a meeting",
35
- text: "Schedule a 30-minute meeting with Krishna next week and show how I can reschedule or cancel.",
 
36
  },
37
  ];
38
 
39
- // --- Helpers for message actions ---
40
- const copyToClipboard = async (text: string) => {
41
- try {
42
- await navigator.clipboard.writeText(text);
43
- } catch {
44
- console.error("Failed to copy text to clipboard");
45
- }
46
- };
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
47
 
48
- const getLastUserMessage = (msgs: { role: string; content: string }[]) =>
49
- [...msgs].reverse().find((m) => m.role === "user") || null;
50
 
 
51
  export default function App() {
52
  const {
53
  threads,
@@ -64,46 +131,65 @@ export default function App() {
64
 
65
  const [input, setInput] = useState("");
66
  const bottomRef = useRef<HTMLDivElement | null>(null);
67
-
68
  const inputRef = useRef<HTMLTextAreaElement | null>(null);
69
  const prevThreadId = useRef<string | null>(null);
70
 
71
- // Scroll on message changes
 
 
 
72
  useEffect(() => {
73
- const currentThreadId = active?.id ?? null;
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
74
 
75
- // If the thread changed, scroll instantly to bottom
 
 
76
  if (currentThreadId !== prevThreadId.current) {
77
  prevThreadId.current = currentThreadId;
78
- bottomRef.current?.scrollIntoView({ behavior: "auto" }); // instant scroll
79
  } else {
80
- // If same thread but messages changed, smooth scroll
81
  bottomRef.current?.scrollIntoView({ behavior: "smooth" });
82
  }
83
  }, [messages, active?.id]);
84
 
85
- const handleShare = async () => {
86
- const url = window.location.href;
87
- const title = document.title || "My Chat";
88
- try {
89
- if (navigator.share) {
90
- await navigator.share({ title, url });
91
- } else {
92
- await navigator.clipboard.writeText(url);
93
- // optionally toast: "Link copied"
94
- }
95
- } catch {
96
- // ignored
97
- }
98
- };
99
-
100
- const handleBookmark = () => {
101
- // Browsers don't allow programmatic bookmarks; show the right shortcut.
102
- const isMac = navigator.platform.toUpperCase().includes("MAC");
103
- const combo = isMac ? "⌘ + D" : "Ctrl + D";
104
- alert(`Press ${combo} to bookmark this page.`);
105
- };
106
-
107
  const sendMessage = useCallback(() => {
108
  const text = input.trim();
109
  if (!text || isStreaming) return;
@@ -111,21 +197,7 @@ export default function App() {
111
  setInput("");
112
  }, [input, isStreaming, send]);
113
 
114
- const selectSuggestion = useCallback((text: string) => {
115
- setInput(text);
116
- requestAnimationFrame(() => inputRef.current?.focus());
117
- }, []);
118
-
119
- const sendSuggestion = useCallback(
120
- (text: string) => {
121
- if (isStreaming) return;
122
- setInput(text);
123
- setTimeout(() => sendMessage(), 0);
124
- },
125
- [isStreaming, sendMessage]
126
- );
127
-
128
- const onKeyDown = useCallback(
129
  (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
130
  if (e.key === "Enter" && !e.shiftKey) {
131
  e.preventDefault();
@@ -135,423 +207,248 @@ export default function App() {
135
  [sendMessage]
136
  );
137
 
138
- const REGEN_PREFIX = "Regenerate this response with a different angle:\n\n";
139
 
140
- const sendPrefixed = useCallback(
141
- (prefix: string, text: string) => {
142
- if (!text || isStreaming) return;
143
- // your hook’s `send` already appends messages & streams
144
- send(`${prefix}${text}`);
145
- setInput("");
146
- },
147
- [send, isStreaming]
148
- );
149
 
150
- // Sidebar
151
- const Sidebar = useMemo(
152
- () => (
153
- <aside className="hidden md:flex w-64 shrink-0 flex-col bg-zinc-950/80 border-r border-zinc-800/60">
154
- <div className="p-4 border-b border-zinc-800/60">
155
- <h1 className="flex items-center text-zinc-100 font-semibold tracking-tight">
156
- <SparklesIcon className="h-4 w-4 text-zinc-300 mr-2" />
157
- ChatK
158
- </h1>
159
- <p className="text-xs text-zinc-400 mt-1">
160
- Chatbot ID:{" "}
161
- <span className="font-mono">
162
- {active?.id ? active.id.slice(0, 8) : "…"}{" "}
163
- </span>
164
- </p>
165
- </div>
166
- <div className="p-3 space-y-2">
167
  <button
168
- className="w-full rounded-xl bg-emerald-600 text-white hover:bg-emerald-500 px-3 py-2 text-sm"
169
  onClick={newChat}
170
- title="Start a new session"
171
  >
172
- New Chat
173
- </button>
174
- <button
175
- className="w-full rounded-xl bg-zinc-800 text-zinc-200 hover:bg-zinc-700 px-3 py-2 text-sm"
176
- onClick={clearChat}
177
- title="Clear current messages"
178
- >
179
- Clear Chat
180
  </button>
181
- {/* View Source on GitHub */}
182
- <a
183
- href="https://github.com/krishna-dhulipalla/LangGraph_ChatBot"
184
- target="_blank"
185
- rel="noopener noreferrer"
186
- className="w-full flex items-center justify-center gap-2 rounded-xl border border-zinc-800 bg-zinc-900 hover:bg-zinc-800 px-3 py-2 text-sm text-zinc-300"
187
- title="View the source code on GitHub"
188
- >
189
- {/* GitHub Icon */}
190
- <svg
191
- xmlns="http://www.w3.org/2000/svg"
192
- viewBox="0 0 24 24"
193
- fill="currentColor"
194
- className="h-4 w-4"
195
- >
196
- <path
197
- fillRule="evenodd"
198
- d="M12 0C5.37 0 0 5.37 0
199
- 12c0 5.3 3.438 9.8 8.205
200
- 11.387.6.113.82-.262.82-.58
201
- 0-.287-.01-1.045-.015-2.05-3.338.724-4.042-1.61-4.042-1.61-.546-1.385-1.333-1.754-1.333-1.754-1.09-.745.083-.73.083-.73
202
- 1.205.085 1.84 1.238 1.84 1.238
203
- 1.07 1.835 2.807 1.305 3.492.997.107-.775.418-1.305.762-1.605-2.665-.3-5.466-1.334-5.466-5.93
204
- 0-1.31.468-2.38 1.235-3.22-.124-.303-.536-1.523.117-3.176
205
- 0 0 1.008-.322 3.3 1.23a11.5 11.5
206
- 0 013.003-.404c1.018.005 2.045.138
207
- 3.003.404 2.29-1.552 3.297-1.23
208
- 3.297-1.23.655 1.653.243 2.873.12
209
- 3.176.77.84 1.233 1.91 1.233 3.22
210
- 0 4.61-2.803 5.625-5.475 5.92.43.372.823
211
- 1.102.823 2.222 0 1.606-.015 2.898-.015 3.293
212
- 0 .32.218.698.825.58C20.565 21.796 24
213
- 17.297 24 12c0-6.63-5.37-12-12-12z"
214
- clipRule="evenodd"
215
- />
216
- </svg>
217
- View Source
218
- </a>
219
  </div>
220
 
221
- {/* Thread list */}
222
- <div className="px-3 pb-3 space-y-1 overflow-y-auto">
223
- {threads.map((t: ThreadMeta) => (
224
- <div
225
- key={t.id}
226
- className={`group w-full flex items-center gap-2 px-3 py-2 rounded-lg hover:bg-zinc-800 cursor-pointer ${
227
- t.id === active?.id ? "bg-zinc-800" : ""
228
- }`}
229
- onClick={() => setActiveThread(t)}
230
- title={t.id}
231
- >
232
- <div className="flex-1 min-w-0 p-1 hover:bg-gray-100">
233
- <div className="text-sm">
234
- {t.title && t.title.length > 20
235
- ? t.title.slice(0, 20) + "..."
236
- : t.title || "Untitled"}
237
- </div>
238
- <div
239
- className="text-zinc-500 truncate"
240
- style={{ fontSize: "10px", fontStyle: "italic" }}
241
- >
242
- {new Date(t.lastAt).toLocaleString()}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
243
  </div>
244
  </div>
245
-
246
- {/* Delete button (shows on hover) */}
247
- <button
248
- type="button"
249
- className="opacity-0 group-hover:opacity-100 shrink-0 rounded-md p-1 border border-zinc-700/60 bg-zinc-900/60 hover:bg-zinc-800/80"
250
- title="Delete thread"
251
- aria-label="Delete thread"
252
- onClick={(e) => {
253
- e.stopPropagation(); // don't switch threads
254
- if (
255
- window.confirm("Delete this thread? This cannot be undone.")
256
- ) {
257
- deleteThread(t.id);
258
- }
259
- }}
260
- >
261
- <TrashIcon className="h-4 w-4 text-zinc-300" />
262
- </button>
263
- </div>
264
- ))}
265
  </div>
266
 
267
- <div className="mt-auto p-3 text-xs text-zinc-500">
268
- Tip: Press <kbd className="px-1 bg-zinc-800 rounded">Enter</kbd> to
269
- send,
270
- <span className="mx-1" />{" "}
271
- <kbd className="px-1 bg-zinc-800 rounded">Shift+Enter</kbd> for
272
- newline.
 
 
 
 
 
273
  </div>
274
  </aside>
275
- ),
276
- [active?.id, clearChat, newChat, setActiveThread, deleteThread, threads]
277
- );
278
 
279
- return (
280
- <div className="h-screen w-screen bg-[#0b0b0f] text-zinc-100 flex">
281
- {Sidebar}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
282
 
283
- {/* Main column */}
284
- <main className="flex-1 flex flex-col">
285
- {/* Header minimal */}
286
- <div className="h-12 shrink-0 flex items-center justify-between px-3 md:px-6 border-b border-zinc-800/60 bg-zinc-950/60 backdrop-blur">
287
  <div className="flex items-center gap-2">
288
- <span className="h-2.5 w-2.5 rounded-full bg-emerald-500 animate-pulse" />
289
- <span className="font-medium">Krishna’s Assistant</span>
290
- </div>
291
- <div className="text-xs text-zinc-400 md:hidden">
292
- ID: {active?.id ? active.id.slice(0, 8) : "…"}
293
- </div>
294
- <div className="ml-auto flex items-center gap-2">
295
- {/* LinkedIn */}
296
- <a
297
- href="https://www.linkedin.com/in/krishnavamsidhulipalla/"
298
- target="_blank"
299
- rel="noopener noreferrer"
300
- className="rounded-xl px-3 py-1.5 text-sm border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
301
- title="LinkedIn"
302
- >
303
- <svg
304
- xmlns="http://www.w3.org/2000/svg"
305
- viewBox="0 0 24 24"
306
- fill="currentColor"
307
- className="h-5 w-5 text-zinc-400 hover:text-zinc-200"
308
- >
309
- <path
310
- d="M19 0h-14c-2.76 0-5 2.24-5 5v14c0
311
- 2.76 2.24 5 5 5h14c2.76 0 5-2.24
312
- 5-5v-14c0-2.76-2.24-5-5-5zm-11
313
- 19h-3v-10h3v10zm-1.5-11.27c-.97
314
- 0-1.75-.79-1.75-1.76s.78-1.76
315
- 1.75-1.76 1.75.79
316
- 1.75 1.76-.78 1.76-1.75
317
- 1.76zm13.5 11.27h-3v-5.5c0-1.31-.02-3-1.83-3-1.83
318
- 0-2.12 1.43-2.12 2.9v5.6h-3v-10h2.88v1.36h.04c.4-.75
319
- 1.38-1.54 2.85-1.54 3.05 0 3.61
320
- 2.01 3.61 4.63v5.55z"
321
- />
322
- </svg>
323
- </a>
324
-
325
- {/* GitHub */}
326
- <a
327
- href="https://github.com/krishna-dhulipalla"
328
- target="_blank"
329
- rel="noopener noreferrer"
330
- className="rounded-xl px-3 py-1.5 text-sm border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
331
- title="GitHub"
332
- >
333
- <svg
334
- xmlns="http://www.w3.org/2000/svg"
335
- viewBox="0 0 24 24"
336
- fill="currentColor"
337
- className="h-5 w-5 text-zinc-400 hover:text-zinc-200"
338
- >
339
- <path
340
- fillRule="evenodd"
341
- d="M12 0C5.37 0 0 5.37 0
342
- 12c0 5.3 3.438 9.8 8.205
343
- 11.387.6.113.82-.262.82-.58
344
- 0-.287-.01-1.045-.015-2.05-3.338.724-4.042-1.61-4.042-1.61-.546-1.385-1.333-1.754-1.333-1.754-1.09-.745.083-.73.083-.73
345
- 1.205.085 1.84 1.238 1.84 1.238
346
- 1.07 1.835 2.807 1.305 3.492.997.107-.775.418-1.305.762-1.605-2.665-.3-5.466-1.334-5.466-5.93
347
- 0-1.31.468-2.38 1.235-3.22-.124-.303-.536-1.523.117-3.176
348
- 0 0 1.008-.322 3.3 1.23a11.5 11.5
349
- 0 013.003-.404c1.018.005 2.045.138
350
- 3.003.404 2.29-1.552 3.297-1.23
351
- 3.297-1.23.655 1.653.243 2.873.12
352
- 3.176.77.84 1.233 1.91 1.233 3.22
353
- 0 4.61-2.803 5.625-5.475 5.92.43.372.823
354
- 1.102.823 2.222 0 1.606-.015 2.898-.015 3.293
355
- 0 .32.218.698.825.58C20.565 21.796 24
356
- 17.297 24 12c0-6.63-5.37-12-12-12z"
357
- clipRule="evenodd"
358
- />
359
- </svg>
360
- </a>
361
- <button
362
- onClick={handleShare}
363
- className="rounded-xl px-3 py-1.5 text-sm border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
364
- title="Share"
365
- >
366
- Share
367
- </button>
368
  <button
369
- onClick={handleBookmark}
370
- className="rounded-xl px-3 py-1.5 text-sm border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
371
- title="Bookmark"
372
  >
373
- <BookmarkIcon className="h-5 w-5 text-zinc-400 hover:text-zinc-200" />
374
  </button>
375
  </div>
376
- </div>
377
 
378
- {/* Messages */}
379
- <div className="flex-1 overflow-y-auto px-3 md:px-6 py-4">
380
  {messages.length === 0 ? (
381
- <EmptyState onSelect={selectSuggestion} onSend={sendSuggestion} />
382
- ) : (
383
- <div className="mx-auto max-w-3xl space-y-3 relative">
384
- {messages.map((m, idx) => {
385
- const isAssistant = m.role === "assistant";
386
- const emptyAssistant =
387
- isAssistant && (!m.content || m.content.trim() === "");
388
- if (emptyAssistant) return null; // hide blank bubble
389
- const key = m.id ?? `m-${idx}`; // NEW stable key
390
- return (
391
- <div key={key} className={isAssistant ? "group" : undefined}>
392
- {/* bubble row */}
393
- <div
394
- className={`flex ${
395
- isAssistant ? "justify-start" : "justify-end"
396
- }`}
397
- >
398
- <div
399
- className={`max-w-[85%] md:max-w-[75%] leading-relaxed tracking-tight rounded-2xl px-4 py-3 shadow-sm ${
400
- isAssistant
401
- ? "bg-zinc-900/80 text-zinc-100 border border-zinc-800/60"
402
- : "bg-emerald-600/90 text-white"
403
- }`}
404
- >
405
- {isAssistant ? (
406
- <>
407
- <ReactMarkdown
408
- remarkPlugins={[remarkGfm]}
409
- components={{
410
- a: (props) => (
411
- <a
412
- {...props}
413
- target="_blank"
414
- rel="noreferrer"
415
- className="underline text-blue-400 hover:text-blue-600"
416
- />
417
- ),
418
- p: (props) => (
419
- <p className="mb-2 last:mb-0" {...props} />
420
- ),
421
- ul: (props) => (
422
- <ul
423
- className="list-disc list-inside mb-2 last:mb-0"
424
- {...props}
425
- />
426
- ),
427
- ol: (props) => (
428
- <ol
429
- className="list-decimal list-inside mb-2 last:mb-0"
430
- {...props}
431
- />
432
- ),
433
- li: (props) => (
434
- <li className="ml-4 mb-1" {...props} />
435
- ),
436
- code: (
437
- props: React.HTMLAttributes<HTMLElement> & {
438
- inline?: boolean;
439
- }
440
- ) => {
441
- // react-markdown v8+ passes 'node', 'inline', etc. in props, but types may not include 'inline'
442
- const {
443
- className,
444
- children,
445
- inline,
446
- ...rest
447
- } = props;
448
- const isInline = inline;
449
- return isInline ? (
450
- <code
451
- className="bg-zinc-800/80 px-1 py-0.5 rounded"
452
- {...rest}
453
- >
454
- {children}
455
- </code>
456
- ) : (
457
- <pre className="overflow-x-auto rounded-xl border border-zinc-800/60 bg-zinc-950/80 p-3 mb-2">
458
- <code className={className} {...rest}>
459
- {children}
460
- </code>
461
- </pre>
462
- );
463
- },
464
- }}
465
- >
466
- {m.content}
467
- </ReactMarkdown>
468
- </>
469
- ) : (
470
- m.content
471
- )}
472
  </div>
473
- </div>
474
-
475
- {/* actions row – only for assistant & only when not streaming */}
476
- {isAssistant && !isStreaming && (
477
- <div className="mt-1 pl-1 flex justify-start">
478
- <MsgActions
479
- content={m.content}
480
- onEdit={() => {
481
- // Prefill composer with the *last user* prompt (ChatGPT-style “Edit”)
482
- const lastUser = getLastUserMessage(messages);
483
- setInput(lastUser ? lastUser.content : m.content);
484
- requestAnimationFrame(() =>
485
- inputRef.current?.focus()
486
- );
487
- }}
488
- onRegenerate={() => {
489
- // Resend last user prompt
490
- const lastUser = getLastUserMessage(messages);
491
- if (!lastUser) return;
492
- sendPrefixed(REGEN_PREFIX, lastUser.content);
493
- }}
494
- />
495
  </div>
496
- )}
497
- </div>
498
- );
499
- })}
500
- {/* Thinking indicator (only BEFORE first token) */}
 
 
 
 
 
 
 
 
 
 
501
  {isStreaming && !hasFirstToken && (
502
- <div className="pointer-events-none relative top-3 left-0 bottom-0 translate-y-2 z-20">
503
- <TypingDots />
 
 
504
  </div>
505
  )}
506
- <div ref={bottomRef} />
507
  </div>
508
  )}
509
  </div>
510
- {/* Warm bottom glow (simple bar + blur)
511
- <div
512
- aria-hidden
513
- className="fixed inset-x-0 bottom-0 z-30 pointer-events-none"
514
- >
515
- <div className="mx-auto max-w-3xl relative h-0">
516
- <div className="absolute left-6 right-6 bottom-0 h-[6px] rounded-full bg-amber-300/40 blur-3xl" />
517
- </div>
518
- </div> */}
519
-
520
- {/* Composer */}
521
- <div className="shrink-0 border-t border-zinc-800/60 bg-zinc-950/80">
522
- <div className="mx-auto max-w-3xl p-3 md:p-4">
523
- <div className="flex gap-2 items-end">
524
- <div className="relative flex-1">
525
- <textarea
526
- ref={inputRef}
527
- className="w-full flex-1 resize-none rounded-2xl bg-zinc-900 text-zinc-100 placeholder-zinc-500 p-3 pr-8 outline-none focus:ring-2 focus:ring-emerald-500/60 min-h-[56px] max-h-48 border border-zinc-800/60"
528
- placeholder="Type a message…"
529
- value={input}
530
- onChange={(e) => setInput(e.target.value)}
531
- onKeyDown={onKeyDown}
532
- disabled={isStreaming}
533
- />
534
- {input && (
535
  <button
536
- onClick={() => {
537
- setInput("");
538
- requestAnimationFrame(() => inputRef.current?.focus());
539
- }}
540
- className="absolute right-2 top-2.5 h-6 w-6 rounded-md border border-zinc-800 bg-zinc-950/70 hover:bg-zinc-800/70 text-zinc-400"
541
- title="Clear"
542
- aria-label="Clear input"
543
  >
544
- ×
545
  </button>
546
- )}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
547
  </div>
548
- <button
549
- className="rounded-2xl px-4 py-3 bg-emerald-600 text-white hover:bg-emerald-500 disabled:opacity-50 disabled:cursor-not-allowed"
550
- onClick={sendMessage}
551
- disabled={!input.trim() || isStreaming}
552
- >
553
- Send
554
- </button>
555
  </div>
556
  </div>
557
  </div>
@@ -560,121 +457,118 @@ export default function App() {
560
  );
561
  }
562
 
563
- function MsgActions({
564
- content,
565
- onEdit,
566
- onRegenerate,
567
  }: {
568
- content: string;
569
- onEdit: () => void;
570
- onRegenerate: () => void;
571
  }) {
572
- return (
573
- <div className="mt-2 flex items-center gap-1 opacity-0 group-hover:opacity-100 transition-opacity">
574
- <button
575
- onClick={() => copyToClipboard(content)}
576
- className="text-xs px-2 py-1 rounded border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
577
- title="Copy"
578
- >
579
- Copy
580
- </button>
581
- <button
582
- onClick={onEdit}
583
- className="text-xs px-2 py-1 rounded border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
584
- title="Edit this as new prompt"
585
- >
586
- Edit
587
- </button>
588
- <button
589
- onClick={onRegenerate}
590
- className="text-xs px-2 py-1 rounded border border-zinc-800 bg-zinc-900 hover:bg-zinc-800"
591
- title="Regenerate"
592
- >
593
- Regenerate
594
- </button>
595
- </div>
596
- );
597
- }
598
 
599
- function EmptyState({
600
- onSelect,
601
- onSend,
602
- }: {
603
- onSelect: (text: string) => void;
604
- onSend: (text: string) => void;
605
- }) {
606
  return (
607
- <div className="h-full flex items-center justify-center">
608
- <div className="mx-auto max-w-3xl w-full px-3 md:px-6">
609
- <div className="text-center text-zinc-400 mb-6">
610
- <h2 className="text-xl text-zinc-200 mb-2">Ask me anything</h2>
611
- <p className="text-sm">
612
- The agent can call tools and remember the conversation (per chatbot
613
- id).
614
- </p>
 
 
615
  </div>
616
-
617
- {/* Starter prompts */}
618
- <div className="grid grid-cols-1 sm:grid-cols-2 gap-3">
619
- {SUGGESTIONS.map((s) => (
620
- <div
621
- role="button"
622
- tabIndex={0}
623
- onClick={() => onSelect(s.text)}
624
- className="group text-left rounded-2xl border border-zinc-800/60 bg-zinc-900/60 hover:bg-zinc-900/90 transition-colors p-4 shadow-sm focus:outline-none focus:ring-2 focus:ring-emerald-500/60"
625
- title="Click to prefill. Use the arrow to send."
626
- >
627
- <div className="flex items-start gap-3">
628
- <div className="shrink-0 h-8 w-8 rounded-xl bg-zinc-800/80 flex items-center justify-center">
629
- <span className="text-sm text-zinc-300">💡</span>
630
- </div>
631
- <div className="flex-1">
632
- <div className="text-sm font-medium text-zinc-200">
633
- {s.title}
634
- </div>
635
- <div className="text-xs text-zinc-400 mt-1 line-clamp-2">
636
- {s.text}
637
- </div>
638
- </div>
639
- <button
640
- type="button"
641
- aria-label="Send this suggestion"
642
- onClick={(e) => {
643
- e.stopPropagation();
644
- onSend(s.text);
645
- }}
646
- className="shrink-0 rounded-xl border border-zinc-700/60 bg-zinc-950/60 px-2 py-2 hover:bg-zinc-900/80"
647
- title="Send now"
648
- >
649
- {/* Arrow icon */}
650
- <svg
651
- viewBox="0 0 24 24"
652
- className="h-4 w-4 text-zinc-300 group-hover:text-emerald-400"
653
- fill="none"
654
- stroke="currentColor"
655
- strokeWidth={2}
656
- strokeLinecap="round"
657
- strokeLinejoin="round"
658
  >
659
- <path d="M7 17L17 7M7 7h10v10" />
660
- </svg>
661
- </button>
662
- </div>
663
- </div>
664
- ))}
665
- </div>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
666
  </div>
667
  </div>
668
  );
669
  }
670
 
671
- function TypingDots() {
 
 
 
 
 
 
 
 
672
  return (
673
- <span className="inline-flex items-center gap-1 align-middle">
674
- <span className="sr-only">Assistant is typing…</span>
675
- <span className="h-1.5 w-1.5 rounded-full bg-zinc-300 animate-bounce [animation-delay:-0.2s]" />
676
- <span className="h-1.5 w-1.5 rounded-full bg-zinc-300 animate-bounce" />
677
- <span className="h-1.5 w-1.5 rounded-full bg-zinc-300 animate-bounce [animation-delay:0.2s]" />
678
- </span>
679
  );
680
  }
 
5
  useRef,
6
  useState,
7
  } from "react";
8
+ import {
9
+ BookmarkIcon,
10
+ TrashIcon,
11
+ SparklesIcon,
12
+ PaperClipIcon,
13
+ MicrophoneIcon,
14
+ PaperAirplaneIcon,
15
+ ArrowPathIcon,
16
+ DocumentDuplicateIcon,
17
+ PencilSquareIcon,
18
+ ChatBubbleLeftRightIcon,
19
+ XMarkIcon,
20
+ CommandLineIcon,
21
+ } from "@heroicons/react/24/outline";
22
  import ReactMarkdown from "react-markdown";
23
  import remarkGfm from "remark-gfm";
 
 
 
 
24
 
25
+ // Internal hooks/types
26
  import { useChat } from "./useChat";
27
  import type { ThreadMeta } from "./threads";
28
 
29
+ // --- Constants & Config ---
30
+ const APP_TITLE = "Krishna's Digital Twin";
31
+ const BOT_AVATAR =
32
+ "https://api.dicebear.com/9.x/bottts-neutral/svg?seed=Krishna1&backgroundColor=6366f1"; // Cleaner 3D-ish Robot
33
+
34
+ const DID_YOU_KNOW = [
35
+ "Krishna achieved a 3.95 GPA during his M.S. at Virginia Tech.",
36
+ "Krishna built an agent that automates Android UI tasks with 80%+ accuracy.",
37
+ "Krishna optimized genomic ETL pipelines reducing runtime by 70%.",
38
+ "Krishna specializes in building autonomous agents and RAG systems.",
39
+ ];
40
+
41
  const SUGGESTIONS = [
42
  {
43
+ title: "Summarize Experience",
44
  text: "Give me a 90-second intro to Krishna Vamsi Dhulipalla—recent work, top strengths, and impact.",
45
+ icon: "✨",
46
  },
47
  {
48
+ title: "Download Resume",
49
  text: "Share Krishna’s latest resume and provide a download link.",
50
+ icon: "📄",
51
  },
52
  {
53
+ title: "Capabilities",
54
+ text: "What tools and actions can you perform for me?",
55
+ icon: "🛠️",
56
  },
57
  {
58
+ title: "Schedule Meeting",
59
+ text: "Schedule a 30-minute meeting with Krishna next week.",
60
+ icon: "📅",
61
  },
62
  ];
63
 
64
+ function DidYouKnowRotator() {
65
+ const [index, setIndex] = useState(0);
66
+ useEffect(() => {
67
+ const interval = setInterval(() => {
68
+ setIndex((prev) => (prev + 1) % DID_YOU_KNOW.length);
69
+ }, 5000); // Rotate every 5 seconds
70
+ return () => clearInterval(interval);
71
+ }, []);
72
+
73
+ return (
74
+ <div className="mt-8 p-4 rounded-xl bg-white/5 border border-white/5 max-w-lg mx-auto text-center animate-in fade-in slide-in-from-bottom-2 duration-700">
75
+ <p className="text-[10px] uppercase tracking-widest text-zinc-500 font-semibold mb-2">
76
+ Did you know?
77
+ </p>
78
+ <p
79
+ key={index}
80
+ className="text-sm text-zinc-300 italic min-h-[20px] transition-all duration-500"
81
+ >
82
+ "{DID_YOU_KNOW[index]}"
83
+ </p>
84
+ </div>
85
+ );
86
+ }
87
+
88
+ // --- Helper: Date Grouping for Sidebar ---
89
+ function groupThreadsByDate(threads: ThreadMeta[]) {
90
+ const today = new Date();
91
+ const yesterday = new Date();
92
+ yesterday.setDate(yesterday.getDate() - 1);
93
+
94
+ const groups: Record<string, ThreadMeta[]> = {
95
+ Today: [],
96
+ Yesterday: [],
97
+ "Previous 7 Days": [],
98
+ Older: [],
99
+ };
100
+
101
+ threads.forEach((t) => {
102
+ const d = new Date(t.lastAt);
103
+ if (d.toDateString() === today.toDateString()) {
104
+ groups["Today"].push(t);
105
+ } else if (d.toDateString() === yesterday.toDateString()) {
106
+ groups["Yesterday"].push(t);
107
+ } else if (d.getTime() > today.getTime() - 7 * 24 * 60 * 60 * 1000) {
108
+ groups["Previous 7 Days"].push(t);
109
+ } else {
110
+ groups["Older"].push(t);
111
+ }
112
+ });
113
 
114
+ return groups;
115
+ }
116
 
117
+ // --- Main Component ---
118
  export default function App() {
119
  const {
120
  threads,
 
131
 
132
  const [input, setInput] = useState("");
133
  const bottomRef = useRef<HTMLDivElement | null>(null);
 
134
  const inputRef = useRef<HTMLTextAreaElement | null>(null);
135
  const prevThreadId = useRef<string | null>(null);
136
 
137
+ // --- Voice Input Logic ---
138
+ const [isListening, setIsListening] = useState(false);
139
+ const recognitionRef = useRef<any>(null);
140
+
141
  useEffect(() => {
142
+ const SpeechRecognition =
143
+ (window as any).SpeechRecognition ||
144
+ (window as any).webkitSpeechRecognition;
145
+ if (SpeechRecognition) {
146
+ recognitionRef.current = new SpeechRecognition();
147
+ recognitionRef.current.continuous = false;
148
+ recognitionRef.current.interimResults = false;
149
+ recognitionRef.current.lang = "en-US";
150
+
151
+ recognitionRef.current.onresult = (event: any) => {
152
+ const transcript = event.results[0][0].transcript;
153
+ setInput((prev) => (prev ? prev + " " + transcript : transcript));
154
+ setIsListening(false);
155
+ };
156
+
157
+ recognitionRef.current.onerror = (event: any) => {
158
+ console.error("Speech recognition error", event.error);
159
+ setIsListening(false);
160
+ };
161
+
162
+ recognitionRef.current.onend = () => {
163
+ setIsListening(false);
164
+ };
165
+ }
166
+ }, []);
167
+
168
+ const toggleListening = useCallback(() => {
169
+ if (!recognitionRef.current) {
170
+ alert("Browser does not support Speech Recognition");
171
+ return;
172
+ }
173
+ if (isListening) {
174
+ recognitionRef.current.stop();
175
+ setIsListening(false);
176
+ } else {
177
+ recognitionRef.current.start();
178
+ setIsListening(true);
179
+ }
180
+ }, [isListening]);
181
 
182
+ // Auto-scroll logic
183
+ useEffect(() => {
184
+ const currentThreadId = active?.id ?? null;
185
  if (currentThreadId !== prevThreadId.current) {
186
  prevThreadId.current = currentThreadId;
187
+ bottomRef.current?.scrollIntoView({ behavior: "auto" });
188
  } else {
 
189
  bottomRef.current?.scrollIntoView({ behavior: "smooth" });
190
  }
191
  }, [messages, active?.id]);
192
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
193
  const sendMessage = useCallback(() => {
194
  const text = input.trim();
195
  if (!text || isStreaming) return;
 
197
  setInput("");
198
  }, [input, isStreaming, send]);
199
 
200
+ const handleKeyDown = useCallback(
 
 
 
 
 
 
 
 
 
 
 
 
 
 
201
  (e: React.KeyboardEvent<HTMLTextAreaElement>) => {
202
  if (e.key === "Enter" && !e.shiftKey) {
203
  e.preventDefault();
 
207
  [sendMessage]
208
  );
209
 
210
+ const groupedThreads = useMemo(() => groupThreadsByDate(threads), [threads]);
211
 
212
+ return (
213
+ <div className="flex h-screen w-screen bg-[#05050A] text-zinc-100 font-sans selection:bg-indigo-500/30">
214
+ {/* Background Ambience */}
215
+ <div className="fixed inset-0 z-0 pointer-events-none">
216
+ <div className="absolute top-[-10%] right-[-5%] w-[500px] h-[500px] bg-indigo-900/20 rounded-full blur-[128px]" />
217
+ <div className="absolute bottom-[-10%] left-[-10%] w-[600px] h-[600px] bg-purple-900/10 rounded-full blur-[128px]" />
218
+ </div>
 
 
219
 
220
+ {/* --- Sidebar --- */}
221
+ <aside className="hidden md:flex flex-col w-[280px] z-10 bg-zinc-950/40 backdrop-blur-xl border-r border-white/5 transition-all">
222
+ {/* Header */}
223
+ <div className="p-4 border-b border-white/5">
 
 
 
 
 
 
 
 
 
 
 
 
 
224
  <button
 
225
  onClick={newChat}
226
+ className="group flex items-center gap-3 w-full px-3 py-2.5 rounded-xl bg-white/5 hover:bg-white/10 border border-white/5 transition-all duration-200"
227
  >
228
+ <div className="p-1.5 rounded-lg bg-indigo-500/10 text-indigo-400 group-hover:text-indigo-300">
229
+ <SparklesIcon className="w-5 h-5" />
230
+ </div>
231
+ <span className="text-sm font-medium text-zinc-200">New Chat</span>
 
 
 
 
232
  </button>
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
233
  </div>
234
 
235
+ {/* History List */}
236
+ <div className="flex-1 overflow-y-auto p-3 space-y-6 scrollbar-thin scrollbar-thumb-zinc-800">
237
+ {Object.entries(groupedThreads).map(([label, group]) => {
238
+ if (group.length === 0) return null;
239
+ return (
240
+ <div key={label}>
241
+ <h3 className="px-3 mb-2 text-[11px] font-semibold uppercase tracking-wider text-zinc-500">
242
+ {label}
243
+ </h3>
244
+ <div className="space-y-0.5">
245
+ {group.map((t) => (
246
+ <div
247
+ key={t.id}
248
+ onClick={() => setActiveThread(t)}
249
+ className={`group relative flex items-center gap-2 px-3 py-2 rounded-lg cursor-pointer transition-colors ${
250
+ active?.id === t.id
251
+ ? "bg-white/10 text-zinc-100"
252
+ : "text-zinc-400 hover:bg-white/5 hover:text-zinc-200"
253
+ }`}
254
+ >
255
+ <span
256
+ className="text-sm flex-1 line-clamp-1 break-all"
257
+ title={t.title || "New Conversation"}
258
+ >
259
+ {t.title || "New Conversation"}
260
+ </span>
261
+ {/* Delete Action */}
262
+ <button
263
+ onClick={(e) => {
264
+ e.stopPropagation();
265
+ if (window.confirm("Delete thread?"))
266
+ deleteThread(t.id);
267
+ }}
268
+ className="opacity-0 group-hover:opacity-100 p-1 rounded hover:bg-red-500/20 text-zinc-500 hover:text-red-400 transition-all"
269
+ >
270
+ <TrashIcon className="w-3.5 h-3.5" />
271
+ </button>
272
+ </div>
273
+ ))}
274
  </div>
275
  </div>
276
+ );
277
+ })}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
278
  </div>
279
 
280
+ {/* Sidebar Footer */}
281
+ <div className="p-4 border-t border-white/5">
282
+ <a
283
+ href="https://github.com/krishna-dhulipalla/LangGraph_ChatBot"
284
+ target="_blank"
285
+ rel="noreferrer"
286
+ className="flex items-center gap-2 text-xs text-zinc-500 hover:text-zinc-300 transition-colors"
287
+ >
288
+ <CommandLineIcon className="w-4 h-4" />
289
+ <span>View Source Code</span>
290
+ </a>
291
  </div>
292
  </aside>
 
 
 
293
 
294
+ {/* --- Main Chat Area --- */}
295
+ <main className="relative z-10 flex-1 flex flex-col h-full overflow-hidden">
296
+ {/* Top Navigation */}
297
+ <header className="h-16 flex items-center justify-between px-6 border-b border-white/5 bg-zinc-950/20 backdrop-blur-sm">
298
+ <div className="flex items-center gap-3">
299
+ <div className="relative">
300
+ <img
301
+ src={BOT_AVATAR}
302
+ alt="Bot"
303
+ className="w-9 h-9 rounded-full ring-2 ring-indigo-500/20 shadow-lg shadow-indigo-500/10"
304
+ />
305
+ <span className="absolute bottom-0 right-0 w-2.5 h-2.5 bg-emerald-500 border-2 border-[#05050A] rounded-full"></span>
306
+ </div>
307
+ <div>
308
+ <h1 className="text-sm font-semibold text-zinc-100">
309
+ {APP_TITLE}
310
+ </h1>
311
+ <p className="text-[10px] text-zinc-400 flex items-center gap-1">
312
+ <span className="inline-block w-1 h-1 rounded-full bg-indigo-500 animate-pulse" />
313
+ Online & Ready
314
+ </p>
315
+ </div>
316
+ </div>
317
 
 
 
 
 
318
  <div className="flex items-center gap-2">
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
319
  <button
320
+ onClick={clearChat}
321
+ className="p-2 text-zinc-400 hover:text-zinc-200 hover:bg-white/5 rounded-full transition-all"
322
+ title="Clear Chat"
323
  >
324
+ <ArrowPathIcon className="w-5 h-5" />
325
  </button>
326
  </div>
327
+ </header>
328
 
329
+ {/* Chat Stream */}
330
+ <div className="flex-1 overflow-y-auto p-4 md:p-8 scroll-smooth">
331
  {messages.length === 0 ? (
332
+ <div className="h-full flex flex-col items-center justify-center max-w-2xl mx-auto animate-in fade-in duration-500">
333
+ <div className="mb-8 p-4 rounded-full bg-white/5 ring-1 ring-white/10 shadow-2xl shadow-indigo-500/10">
334
+ <ChatBubbleLeftRightIcon className="w-10 h-10 text-indigo-400" />
335
+ </div>
336
+ <h2 className="text-2xl font-semibold text-transparent bg-clip-text bg-gradient-to-br from-zinc-100 to-zinc-500 mb-2">
337
+ How can I help you today?
338
+ </h2>
339
+ <p className="text-zinc-400 text-sm mb-6 max-w-md text-center">
340
+ I'm Krishna's digital twin. Ask me about his architecture
341
+ skills, recent projects, or schedule a meeting.
342
+ </p>
343
+
344
+ {/* <DidYouKnowRotator /> */}
345
+ {/* <div className="h-8"></div> */}
346
+
347
+ {/* Suggestions Grid */}
348
+ <div className="grid grid-cols-1 md:grid-cols-2 gap-3 w-full">
349
+ {SUGGESTIONS.map((s, i) => (
350
+ <button
351
+ key={i}
352
+ onClick={() => {
353
+ // Just send immediately without prefilling input state visually
354
+ if (!isStreaming) send(s.text);
355
+ }}
356
+ className="group flex items-start gap-4 p-4 rounded-2xl bg-white/5 hover:bg-white/10 border border-white/5 hover:border-white/10 text-left transition-all hover:-translate-y-0.5 hover:shadow-lg hover:shadow-indigo-500/10"
357
+ >
358
+ <span className="text-xl grayscale group-hover:grayscale-0 transition-all">
359
+ {s.icon}
360
+ </span>
361
+ <div>
362
+ <div className="text-sm font-medium text-zinc-200 group-hover:text-indigo-300 transition-colors">
363
+ {s.title}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
364
  </div>
365
+ <div className="text-xs text-zinc-500 mt-1 line-clamp-1 group-hover:text-zinc-400 transition-colors">
366
+ {s.text}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
367
  </div>
368
+ </div>
369
+ </button>
370
+ ))}
371
+ </div>
372
+ </div>
373
+ ) : (
374
+ <div className="max-w-3xl mx-auto space-y-6">
375
+ {messages.map((m, idx) => (
376
+ <Bubble
377
+ key={m.id || idx}
378
+ message={m}
379
+ isStreaming={isStreaming && idx === messages.length - 1}
380
+ />
381
+ ))}
382
+ {/* Streaming Indicator */}
383
  {isStreaming && !hasFirstToken && (
384
+ <div className="flex gap-1 ml-14">
385
+ <span className="w-2 h-2 rounded-full bg-zinc-600 animate-bounce" />
386
+ <span className="w-2 h-2 rounded-full bg-zinc-600 animate-bounce [animation-delay:0.1s]" />
387
+ <span className="w-2 h-2 rounded-full bg-zinc-600 animate-bounce [animation-delay:0.2s]" />
388
  </div>
389
  )}
390
+ <div ref={bottomRef} className="h-4" />
391
  </div>
392
  )}
393
  </div>
394
+
395
+ {/* --- Input Area --- */}
396
+ <div className="p-4 md:p-6 pb-8">
397
+ <div className="max-w-3xl mx-auto relative">
398
+ <div className="relative flex flex-col gap-2 p-2 rounded-3xl bg-zinc-900/50 backdrop-blur-xl border border-white/10 shadow-2xl focus-within:ring-1 focus-within:ring-indigo-500/50 transition-all">
399
+ <textarea
400
+ ref={inputRef}
401
+ value={input}
402
+ onChange={(e) => setInput(e.target.value)}
403
+ onKeyDown={handleKeyDown}
404
+ placeholder="Type a message..."
405
+ className="w-full bg-transparent text-zinc-100 placeholder-zinc-500 px-4 py-3 min-h-[50px] max-h-[200px] resize-none outline-none text-[15px] leading-relaxed scrollbar-hide"
406
+ rows={1}
407
+ />
408
+
409
+ {/* Input Actions Bar */}
410
+ <div className="flex items-center justify-between px-2 pb-1">
411
+ <div className="flex items-center gap-1">
 
 
 
 
 
 
 
412
  <button
413
+ className="cursor-not-allowed opacity-50 p-2 rounded-xl text-zinc-400 hover:text-zinc-200 hover:bg-white/5 transition-colors"
414
+ title="Attach file (coming soon)"
 
 
 
 
 
415
  >
416
+ <PaperClipIcon className="w-5 h-5" />
417
  </button>
418
+ <button
419
+ onClick={toggleListening}
420
+ className={`p-2 rounded-xl transition-all duration-300 ${
421
+ isListening
422
+ ? "text-red-400 bg-red-500/10 animate-pulse ring-1 ring-red-500/20"
423
+ : "text-zinc-400 hover:text-zinc-200 hover:bg-white/5"
424
+ }`}
425
+ title={isListening ? "Stop Listening" : "Voice Input"}
426
+ >
427
+ <MicrophoneIcon className="w-5 h-5" />
428
+ </button>
429
+ </div>
430
+
431
+ <button
432
+ onClick={sendMessage}
433
+ disabled={!input.trim() || isStreaming}
434
+ className={`p-2 rounded-xl flex items-center gap-2 transition-all ${
435
+ input.trim() && !isStreaming
436
+ ? "bg-indigo-600 text-white shadow-lg shadow-indigo-500/20 hover:bg-indigo-500"
437
+ : "bg-zinc-800 text-zinc-500 cursor-not-allowed"
438
+ }`}
439
+ >
440
+ {isStreaming ? (
441
+ <span className="w-5 h-5 border-2 border-white/20 border-t-white rounded-full animate-spin" />
442
+ ) : (
443
+ <PaperAirplaneIcon className="w-5 h-5 -ml-0.5 transform -rotate-45 translate-x-0.5" />
444
+ )}
445
+ </button>
446
  </div>
447
+ </div>
448
+ <div className="text-center mt-3">
449
+ <p className="text-[10px] text-zinc-600">
450
+ AI can make mistakes. Please verify important information.
451
+ </p>
 
 
452
  </div>
453
  </div>
454
  </div>
 
457
  );
458
  }
459
 
460
+ // --- Message Bubble Component ---
461
+ function Bubble({
462
+ message,
463
+ isStreaming,
464
  }: {
465
+ message: any;
466
+ isStreaming: boolean;
 
467
  }) {
468
+ const isUser = message.role === "user";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
469
 
 
 
 
 
 
 
 
470
  return (
471
+ <div
472
+ className={`flex gap-4 ${isUser ? "justify-end" : "justify-start group"}`}
473
+ >
474
+ {!isUser && (
475
+ <div className="shrink-0 flex flex-col gap-2">
476
+ <img
477
+ src={BOT_AVATAR}
478
+ alt="AI"
479
+ className="w-8 h-8 rounded-full ring-1 ring-white/10"
480
+ />
481
  </div>
482
+ )}
483
+
484
+ <div
485
+ className={`relative max-w-[85%] md:max-w-[75%] rounded-2xl px-5 py-3.5 shadow-sm text-[15px] leading-7 ${
486
+ isUser
487
+ ? "bg-gradient-to-br from-indigo-600 to-violet-600 text-white rounded-br-sm shadow-md shadow-indigo-500/10"
488
+ : "bg-white/5 border border-white/5 text-zinc-100 rounded-bl-sm backdrop-blur-md"
489
+ }`}
490
+ >
491
+ <ReactMarkdown
492
+ remarkPlugins={[remarkGfm]}
493
+ components={{
494
+ a: ({ ...props }) => (
495
+ <a
496
+ {...props}
497
+ className="text-blue-400 hover:underline"
498
+ target="_blank"
499
+ />
500
+ ),
501
+ code: ({ inline, className, children, ...props }: any) => {
502
+ if (inline)
503
+ return (
504
+ <code
505
+ className="bg-white/10 px-1 py-0.5 rounded font-mono text-sm"
506
+ {...props}
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
507
  >
508
+ {children}
509
+ </code>
510
+ );
511
+ return (
512
+ <pre className="bg-zinc-950/50 p-3 rounded-xl border border-white/5 overflow-x-auto my-2 text-sm">
513
+ <code className={className} {...props}>
514
+ {children}
515
+ </code>
516
+ </pre>
517
+ );
518
+ },
519
+ ul: (props) => (
520
+ <ul
521
+ className="list-disc list-inside ml-2 space-y-1 my-2"
522
+ {...props}
523
+ />
524
+ ),
525
+ ol: (props) => (
526
+ <ol
527
+ className="list-decimal list-inside ml-2 space-y-1 my-2"
528
+ {...props}
529
+ />
530
+ ),
531
+ p: (props) => <p className="mb-2 last:mb-0" {...props} />,
532
+ }}
533
+ >
534
+ {message.content}
535
+ </ReactMarkdown>
536
+
537
+ {/* Actions for Assistant */}
538
+ {!isUser && !isStreaming && (
539
+ <div className="absolute -bottom-6 left-0 opacity-0 group-hover:opacity-100 transition-opacity flex items-center gap-2">
540
+ <ActionButton
541
+ icon={<DocumentDuplicateIcon className="w-3.5 h-3.5" />}
542
+ label="Copy"
543
+ onClick={() => navigator.clipboard.writeText(message.content)}
544
+ />
545
+ <ActionButton
546
+ icon={<ArrowPathIcon className="w-3.5 h-3.5" />}
547
+ label="Regenerate"
548
+ onClick={() => {}}
549
+ />
550
+ </div>
551
+ )}
552
  </div>
553
  </div>
554
  );
555
  }
556
 
557
+ function ActionButton({
558
+ icon,
559
+ label,
560
+ onClick,
561
+ }: {
562
+ icon: any;
563
+ label: string;
564
+ onClick: () => void;
565
+ }) {
566
  return (
567
+ <button
568
+ onClick={onClick}
569
+ className="flex items-center gap-1 text-[10px] text-zinc-500 hover:text-zinc-300 bg-white/5 px-2 py-1 rounded-md transition-colors"
570
+ >
571
+ {icon} {label}
572
+ </button>
573
  );
574
  }
ui/src/index.css CHANGED
@@ -1 +1,19 @@
1
  @import "tailwindcss";
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  @import "tailwindcss";
2
+
3
+ @layer utilities {
4
+ /* Custom Scrollbar for Dark Mode */
5
+ ::-webkit-scrollbar {
6
+ width: 6px;
7
+ height: 6px;
8
+ }
9
+ ::-webkit-scrollbar-track {
10
+ background: transparent;
11
+ }
12
+ ::-webkit-scrollbar-thumb {
13
+ background: #3f3f46; /* zinc-700 */
14
+ border-radius: 10px;
15
+ }
16
+ ::-webkit-scrollbar-thumb:hover {
17
+ background: #52525b; /* zinc-600 */
18
+ }
19
+ }
ui/src/main.tsx CHANGED
@@ -2,7 +2,6 @@ import { StrictMode } from "react";
2
  import { createRoot } from "react-dom/client";
3
  import "./index.css";
4
  import App from "./App.tsx";
5
- import "./tailwind.css";
6
 
7
  createRoot(document.getElementById("root")!).render(
8
  <StrictMode>
 
2
  import { createRoot } from "react-dom/client";
3
  import "./index.css";
4
  import App from "./App.tsx";
 
5
 
6
  createRoot(document.getElementById("root")!).render(
7
  <StrictMode>
ui/vite.config.ts CHANGED
@@ -1,8 +1,10 @@
1
  import { defineConfig } from "vite";
2
  import react from "@vitejs/plugin-react";
 
 
3
  export default defineConfig({
4
- plugins: [react()],
5
  server: {
6
- proxy: { "/chat": { target: "http://localhost:8000", changeOrigin: true } },
7
  },
8
  });
 
1
  import { defineConfig } from "vite";
2
  import react from "@vitejs/plugin-react";
3
+ import tailwindcss from "@tailwindcss/vite";
4
+
5
  export default defineConfig({
6
+ plugins: [react(), tailwindcss()],
7
  server: {
8
+ proxy: { "/chat": { target: "http://127.0.0.1:8000", changeOrigin: true } },
9
  },
10
  });