mishrabp commited on
Commit
58629c3
·
verified ·
1 Parent(s): dda2bf1

Upload folder using huggingface_hub

Browse files
Files changed (36) hide show
  1. .gitattributes +1 -0
  2. Dockerfile +35 -36
  3. README.md +1 -1
  4. pyproject.toml +13 -4
  5. run.py +26 -6
  6. src/interview-assistant/Dockerfile +35 -36
  7. src/interview-assistant/README.md +1 -1
  8. src/interview-assistant/agents/__init__.py +0 -0
  9. src/interview-assistant/agents/definitions.py +166 -0
  10. src/interview-assistant/app.py +616 -196
  11. src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/data_level0.bin +3 -0
  12. src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/header.bin +3 -0
  13. src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/length.bin +3 -0
  14. src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/link_lists.bin +0 -0
  15. src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/data_level0.bin +3 -0
  16. src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/header.bin +3 -0
  17. src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/length.bin +3 -0
  18. src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/link_lists.bin +0 -0
  19. src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/data_level0.bin +3 -0
  20. src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/header.bin +3 -0
  21. src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/length.bin +3 -0
  22. src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/link_lists.bin +0 -0
  23. src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/data_level0.bin +3 -0
  24. src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/header.bin +3 -0
  25. src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/length.bin +3 -0
  26. src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/link_lists.bin +0 -0
  27. src/interview-assistant/data/interview_rag_db/chroma.sqlite3 +3 -0
  28. src/interview-assistant/data/interview_state.db +0 -0
  29. src/interview-assistant/implementation.md +115 -0
  30. src/interview-assistant/pdf_utils.py +53 -0
  31. src/interview-assistant/rag/db.py +94 -0
  32. src/interview-assistant/rag/ingest.py +53 -0
  33. src/interview-assistant/teams/evaluation_team.py +72 -0
  34. src/interview-assistant/teams/interview_team.py +103 -0
  35. src/interview-assistant/tools/rag_tools.py +14 -0
  36. uv.lock +105 -39
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ src/interview-assistant/data/interview_rag_db/chroma.sqlite3 filter=lfs diff=lfs merge=lfs -text
Dockerfile CHANGED
@@ -1,36 +1,35 @@
1
- FROM python:3.12-slim
2
-
3
- ENV PYTHONUNBUFFERED=1 \
4
- DEBIAN_FRONTEND=noninteractive \
5
- PYTHONPATH=/app/src/interview-assistant:$PYTHONPATH
6
-
7
- WORKDIR /app
8
-
9
- # System deps
10
- RUN apt-get update && apt-get install -y \
11
- git build-essential curl \
12
- && rm -rf /var/lib/apt/lists/*
13
-
14
- # Install uv
15
- RUN curl -LsSf https://astral.sh/uv/install.sh | sh
16
- ENV PATH="/root/.local/bin:$PATH"
17
-
18
- # Copy project metadata
19
- COPY pyproject.toml .
20
- COPY uv.lock .
21
-
22
- # Copy application code
23
- COPY common/ ./common/
24
- COPY src/interview-assistant/ ./src/interview-assistant/
25
-
26
- # Install dependencies using uv, then export and install with pip to system
27
- # We use --no-dev to exclude dev dependencies if any
28
- RUN uv sync --frozen --no-dev && \
29
- uv pip install -e . --system
30
-
31
- # Copy entry point
32
- COPY run.py .
33
-
34
- EXPOSE 7860
35
-
36
- CMD ["python", "run.py", "interview-assistant", "--port", "7860"]
 
1
+ FROM python:3.12-slim
2
+
3
+ ENV PYTHONUNBUFFERED=1 \
4
+ DEBIAN_FRONTEND=noninteractive \
5
+ PYTHONPATH=/app:/app/common:$PYTHONPATH
6
+
7
+ WORKDIR /app
8
+
9
+ # System deps
10
+ RUN apt-get update && apt-get install -y \
11
+ git build-essential curl \
12
+ && rm -rf /var/lib/apt/lists/*
13
+
14
+ # Install uv
15
+ RUN curl -LsSf https://astral.sh/uv/install.sh | sh
16
+ ENV PATH="/root/.local/bin:$PATH"
17
+
18
+ # Copy project metadata
19
+ COPY pyproject.toml .
20
+ COPY uv.lock .
21
+
22
+ # Copy required folders
23
+ COPY common/ ./common/
24
+ COPY src/interview-assistant/ ./src/interview-assistant/
25
+
26
+ # Install dependencies using uv, then export and install with pip to system
27
+ RUN uv sync --frozen --no-dev && \
28
+ uv pip install -e . --system
29
+
30
+ # Copy entry point
31
+ COPY run.py .
32
+
33
+ EXPOSE 7860
34
+
35
+ CMD ["python", "run.py", "interview-assistant", "--port", "7860"]
 
README.md CHANGED
@@ -65,4 +65,4 @@ docker build -t interviewer-assistant -f src/interviewer-assistant/Dockerfile .
65
 
66
  # Run
67
  docker run -p 7860:7860 interviewer-assistant
68
- ```
 
65
 
66
  # Run
67
  docker run -p 7860:7860 interviewer-assistant
68
+ ```
pyproject.toml CHANGED
@@ -1,5 +1,5 @@
1
  [project]
2
- name = "agents"
3
  version = "0.1.0"
4
  description = "Agentic AI project"
5
  readme = "README.md"
@@ -47,7 +47,7 @@ dependencies = [
47
  # VECTOR DB / INDEXING
48
  # =======================
49
  "faiss-cpu>=1.13.0",
50
- "chromadb==1.3.5",
51
  "sentence-transformers>=5.1.2",
52
  "pymupdf",
53
  "pypdf>=6.3.0",
@@ -120,7 +120,7 @@ dependencies = [
120
  # MACHINE LEARNING
121
  # =======================
122
  "scikit-learn>=1.7.2",
123
- "huggingface_hub<=1.1.4",
124
  "datasets>=4.4.1",
125
 
126
  # =======================
@@ -143,6 +143,15 @@ dependencies = [
143
  "opentelemetry-sdk>=1.20.0",
144
  "opentelemetry-exporter-otlp>=1.20.0",
145
  "opentelemetry-api>=1.20.0",
 
 
 
 
 
 
 
 
 
146
  ]
147
 
148
  [dependency-groups]
@@ -181,4 +190,4 @@ include = ["common*", "src*"] # Treat 'common' and 'src' folders as packages
181
  # just like the apps do locally, preventing ModuleNotFoundError.
182
  pythonpath = ["src", "common"]
183
  testpaths = ["tests"] # Only look for tests in the 'tests' directory
184
- addopts = "-q" # Run in quiet mode (less verbose output)
 
1
  [project]
2
+ name = "agenticai"
3
  version = "0.1.0"
4
  description = "Agentic AI project"
5
  readme = "README.md"
 
47
  # VECTOR DB / INDEXING
48
  # =======================
49
  "faiss-cpu>=1.13.0",
50
+ "chromadb==1.4.1",
51
  "sentence-transformers>=5.1.2",
52
  "pymupdf",
53
  "pypdf>=6.3.0",
 
120
  # MACHINE LEARNING
121
  # =======================
122
  "scikit-learn>=1.7.2",
123
+ "huggingface_hub<=1.3.2",
124
  "datasets>=4.4.1",
125
 
126
  # =======================
 
143
  "opentelemetry-sdk>=1.20.0",
144
  "opentelemetry-exporter-otlp>=1.20.0",
145
  "opentelemetry-api>=1.20.0",
146
+
147
+ # =======================
148
+ # Google Authentication
149
+ # =======================
150
+ "google-auth>=2.22.0",
151
+ "google-auth-oauthlib>=0.4.6",
152
+ "google-auth-httplib2>=0.1.0",
153
+ "autoflake>=1.5.0",
154
+
155
  ]
156
 
157
  [dependency-groups]
 
190
  # just like the apps do locally, preventing ModuleNotFoundError.
191
  pythonpath = ["src", "common"]
192
  testpaths = ["tests"] # Only look for tests in the 'tests' directory
193
+ addopts = "-q" # Run in quiet mode (less verbose output)
run.py CHANGED
@@ -33,8 +33,13 @@ APP_REGISTRY: Dict[str, Dict[str, str]] = {
33
  "entry": "app.py",
34
  "description": "Healthcare Assistant - Medical information with RAG and web search"
35
  },
36
- "deep-research": {
37
- "path": "src/deep-research",
 
 
 
 
 
38
  "entry": "app.py",
39
  "description": "Deep Research AI - Comprehensive research assistant"
40
  },
@@ -53,13 +58,23 @@ APP_REGISTRY: Dict[str, Dict[str, str]] = {
53
  "entry": "main.py",
54
  "description": "Trip Planner - Detailed trip itinerary planning"
55
  },
56
- "chatbot": {
57
- "path": "src/chatbot",
58
  "entry": "app.py",
59
  "description": "General Chatbot - Multi-purpose conversational AI"
60
  },
61
- "accessibility": {
62
- "path": "src/accessibility",
 
 
 
 
 
 
 
 
 
 
63
  "entry": "app.py",
64
  "description": "Accessibility Tools - Assistive technology applications"
65
  },
@@ -82,6 +97,11 @@ APP_REGISTRY: Dict[str, Dict[str, str]] = {
82
  "path": "src/interview-assistant",
83
  "entry": "app.py",
84
  "description": "Interview Assistant - Multi-agent interview tool"
 
 
 
 
 
85
  }
86
  }
87
 
 
33
  "entry": "app.py",
34
  "description": "Healthcare Assistant - Medical information with RAG and web search"
35
  },
36
+ "deep-research_v1": {
37
+ "path": "src/deep-research_v1",
38
+ "entry": "app.py",
39
+ "description": "Deep Research AI - Comprehensive research assistant"
40
+ },
41
+ "deep-research_v2": {
42
+ "path": "src/deep-research_v2",
43
  "entry": "app.py",
44
  "description": "Deep Research AI - Comprehensive research assistant"
45
  },
 
58
  "entry": "main.py",
59
  "description": "Trip Planner - Detailed trip itinerary planning"
60
  },
61
+ "chatbot_v1": {
62
+ "path": "src/chatbot_v1",
63
  "entry": "app.py",
64
  "description": "General Chatbot - Multi-purpose conversational AI"
65
  },
66
+ "chatbot_v2": {
67
+ "path": "src/chatbot_v2",
68
+ "entry": "app.py",
69
+ "description": "Layered Chatbot (ReAct) - Advanced Architecture"
70
+ },
71
+ "accessibility_v2": {
72
+ "path": "src/accessibility_v2",
73
+ "entry": "app.py",
74
+ "description": "Accessibility Auditor V2 - Layered Architecture"
75
+ },
76
+ "accessibility_v1": {
77
+ "path": "src/accessibility_v1",
78
  "entry": "app.py",
79
  "description": "Accessibility Tools - Assistive technology applications"
80
  },
 
97
  "path": "src/interview-assistant",
98
  "entry": "app.py",
99
  "description": "Interview Assistant - Multi-agent interview tool"
100
+ },
101
+ "finadvisor": {
102
+ "path": "src/finadvisor",
103
+ "entry": "app.py",
104
+ "description": "Financial Advisor - Multi-agent financial advisor tool"
105
  }
106
  }
107
 
src/interview-assistant/Dockerfile CHANGED
@@ -1,36 +1,35 @@
1
- FROM python:3.12-slim
2
-
3
- ENV PYTHONUNBUFFERED=1 \
4
- DEBIAN_FRONTEND=noninteractive \
5
- PYTHONPATH=/app/src/interview-assistant:$PYTHONPATH
6
-
7
- WORKDIR /app
8
-
9
- # System deps
10
- RUN apt-get update && apt-get install -y \
11
- git build-essential curl \
12
- && rm -rf /var/lib/apt/lists/*
13
-
14
- # Install uv
15
- RUN curl -LsSf https://astral.sh/uv/install.sh | sh
16
- ENV PATH="/root/.local/bin:$PATH"
17
-
18
- # Copy project metadata
19
- COPY pyproject.toml .
20
- COPY uv.lock .
21
-
22
- # Copy application code
23
- COPY common/ ./common/
24
- COPY src/interview-assistant/ ./src/interview-assistant/
25
-
26
- # Install dependencies using uv, then export and install with pip to system
27
- # We use --no-dev to exclude dev dependencies if any
28
- RUN uv sync --frozen --no-dev && \
29
- uv pip install -e . --system
30
-
31
- # Copy entry point
32
- COPY run.py .
33
-
34
- EXPOSE 7860
35
-
36
- CMD ["python", "run.py", "interview-assistant", "--port", "7860"]
 
1
+ FROM python:3.12-slim
2
+
3
+ ENV PYTHONUNBUFFERED=1 \
4
+ DEBIAN_FRONTEND=noninteractive \
5
+ PYTHONPATH=/app:/app/common:$PYTHONPATH
6
+
7
+ WORKDIR /app
8
+
9
+ # System deps
10
+ RUN apt-get update && apt-get install -y \
11
+ git build-essential curl \
12
+ && rm -rf /var/lib/apt/lists/*
13
+
14
+ # Install uv
15
+ RUN curl -LsSf https://astral.sh/uv/install.sh | sh
16
+ ENV PATH="/root/.local/bin:$PATH"
17
+
18
+ # Copy project metadata
19
+ COPY pyproject.toml .
20
+ COPY uv.lock .
21
+
22
+ # Copy required folders
23
+ COPY common/ ./common/
24
+ COPY src/interview-assistant/ ./src/interview-assistant/
25
+
26
+ # Install dependencies using uv, then export and install with pip to system
27
+ RUN uv sync --frozen --no-dev && \
28
+ uv pip install -e . --system
29
+
30
+ # Copy entry point
31
+ COPY run.py .
32
+
33
+ EXPOSE 7860
34
+
35
+ CMD ["python", "run.py", "interview-assistant", "--port", "7860"]
 
src/interview-assistant/README.md CHANGED
@@ -65,4 +65,4 @@ docker build -t interviewer-assistant -f src/interviewer-assistant/Dockerfile .
65
 
66
  # Run
67
  docker run -p 7860:7860 interviewer-assistant
68
- ```
 
65
 
66
  # Run
67
  docker run -p 7860:7860 interviewer-assistant
68
+ ```
src/interview-assistant/agents/__init__.py ADDED
File without changes
src/interview-assistant/agents/definitions.py ADDED
@@ -0,0 +1,166 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.agents import AssistantAgent
2
+ from autogen_ext.models.openai import OpenAIChatCompletionClient
3
+ import os
4
+ from rag.db import get_db
5
+ from dotenv import load_dotenv
6
+ from tools.rag_tools import search_candidate_knowledge_base
7
+
8
+ # Assuming agents/definitions.py is in src/interview-assistant/agents/, root is 3 levels up
9
+ ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
10
+ load_dotenv(os.path.join(ROOT_DIR, ".env"))
11
+
12
+ # Shared Model Client
13
+ # Shared Model Client
14
+ # model_client = OpenAIChatCompletionClient(model="gpt-4")
15
+
16
+ # Ollama Cloud
17
+ api_key = os.getenv("OLLAMA_API_KEY")
18
+ print(f"[DEBUG] Loading Ollama Client on Ollama Cloud. Model: gemma3:4b")
19
+
20
+ model_client = OpenAIChatCompletionClient(
21
+ model="gemma3:4b",
22
+ api_key=api_key,
23
+ base_url="https://ollama.com/v1",
24
+ model_info={
25
+ "vision": False,
26
+ "function_calling": True,
27
+ "json_output": True,
28
+ "family": "gemma"
29
+ },
30
+ extra_parameters={"max_tokens": 4096}
31
+ )
32
+
33
+ # --- Tools ---
34
+ # Tools moved to src/interview-assistant/tools/rag_tools.py
35
+
36
+ # --- Evaluation Agents ---
37
+
38
+ def get_jd_summarizer():
39
+ return AssistantAgent(
40
+ name="JD_Summarizer",
41
+ model_client=model_client,
42
+ system_message="""You are an expert Talent Acquisition Specialist.
43
+ Your task is to analyze the Job Description and extract the Critical Skills, Required Experience, and Nice-to-Have attributes.
44
+ Output a concise summary bullet point list."""
45
+ )
46
+
47
+ def get_resume_summarizer():
48
+ return AssistantAgent(
49
+ name="Resume_Summarizer",
50
+ model_client=model_client,
51
+ tools=[search_candidate_knowledge_base],
52
+ system_message="""You are a Researcher. Your goal is to find evidence in the candidate's resume that matches the JD requirements.
53
+ Use the `search_candidate_knowledge_base` tool to find skills and experience.
54
+ Summarize what the candidate HAS and what they are MISSING based on the evidence found."""
55
+ )
56
+
57
+ def get_evaluator():
58
+ return AssistantAgent(
59
+ name="Evaluator",
60
+ model_client=model_client,
61
+ system_message="""You are the Lead Evaluator.
62
+ You will receive the Job Description and the Candidate's Resume Context.
63
+
64
+ TASK:
65
+ 1. Compare the Candidate's skills/experience against the JD critical requirements.
66
+ 2. Identify specific Matches and Gaps.
67
+ 3. Assign a Fitness Score (0-10) using the SCORING RUBRIC below.
68
+
69
+ SCORING RUBRIC (BE STRICT & CONSISTENT):
70
+ - 0-3 (Mismatch): Missing >50% of critical skills or wrong domain (e.g., Java dev for Python role).
71
+ - 4-6 (Partial): Has core skills but lacks seniority, specific framework exp, or key leadership traits.
72
+ - 7-8 (Good): Matches ~80% of requirements. Minor gaps (e.g., missing a nice-to-have).
73
+ - 9-10 (Perfect): 100% match + exceeds expectations (e.g., has bonus skills, higher seniority).
74
+
75
+ OUTPUT:
76
+ Produce a draft analysis with:
77
+ - Score (Must align with Rubric)
78
+ - Top 3 Strengths (Matches - Be specific)
79
+ - Top 3 Weaknesses (Gaps - Be specific)
80
+ - Brief Summary
81
+ """
82
+ )
83
+
84
+ def get_coordinator():
85
+ return AssistantAgent(
86
+ name="Coordinator",
87
+ model_client=model_client,
88
+ system_message="""You are the Quality Assurance Manager. Review the Evaluator's output.
89
+
90
+ CRITICAL OUTPUT RULES:
91
+ 1. You must output the Final Evaluation in STRICT JSON format.
92
+ 2. The JSON must have these exact keys:
93
+ - "score": Integer (0-10)
94
+ - "key_matches": List[str] (Specific skills/experiences from JD found in Resume)
95
+ - "gaps": List[str] (Specific requirements missing)
96
+ - "summary": str (Brief reasoning)
97
+
98
+ QUALITY CHECK:
99
+ - Ensure the 'summary' is insightful and not just a restatement.
100
+ - Ensure 'key_matches' and 'gaps' are specific, not generic.
101
+
102
+ DECISION:
103
+ - If satisfied, output the valid JSON immediately followed by "EVALUATION_APPROVED".
104
+ - If NOT satisfied (e.g., vague analysis, missing JSON, unstructured), REJECT.
105
+ - Provide specific instructions to the Evaluator on what to fix.
106
+ - Do NOT output the termination keyword.
107
+ """
108
+ )
109
+
110
+ # --- Interview Design Agents (Flow 3) ---
111
+
112
+ def get_question_generator():
113
+ return AssistantAgent(
114
+ name="Question_Generator",
115
+ model_client=model_client,
116
+ system_message="""You are a Strategy-Driven Senior Interviewer.
117
+
118
+ PHASE 1: STRATEGY
119
+ - Analyze the JD Role & Seniority (Junior vs Senior vs Architect vs Manager).
120
+ - Determine weights:
121
+ * Developer: Focus on Tech.
122
+ * Architect: Focus on System Design/Tech.
123
+ * Manager: Focus on Leadership/Behavioral.
124
+
125
+ PHASE 2: GENERATION
126
+ - Generate exactly 10 Interview Questions based on the strategy.
127
+
128
+ CRITICAL INSTRUCTION:
129
+ - Each Question MUST be clear and descriptive (approx. 50-100 words).
130
+ - DO NOT ask simple one-liners. Use scenario-based questions or multi-part situational problems.
131
+
132
+ ORGANIZATION:
133
+ - Group the questions by Category: Present all Technical questions first, then Leadership, then Behavioral.
134
+
135
+ OUTPUT FORMAT:
136
+ Output the questions as a JSON List of Objects. Each object must have:
137
+ {
138
+ "category": "Technical|Leadership|Behavioral",
139
+ "u_id": int (1-10),
140
+ "question": "The detailed scenario-based question text (50-100 words)",
141
+ "complexity": "Low|Medium|High",
142
+ "likely_answer": "Key points expected in a good answer"
143
+ }
144
+ """
145
+ )
146
+
147
+ def get_question_reviewer():
148
+ return AssistantAgent(
149
+ name="Question_Reviewer",
150
+ model_client=model_client,
151
+ system_message="""You are the Interview Board Chair. Review the generated questions.
152
+
153
+ CHECKLIST:
154
+ 1. Are there exactly 10 questions?
155
+ 2. Do they cover the specific topics identified by the Strategist?
156
+ 3. Are the questions SUFFICIENTLY DETAILED (50-100 words each)?
157
+ 4. Are the 'likely_answer' keys provided and accurate?
158
+ 5. Is the format valid JSON?
159
+
160
+ DECISION:
161
+ - If satisfied (ALL checks pass), output the final JSON list and then write "GUIDE_APPROVED" on a new line.
162
+ - If NOT satisfied (e.g., questions are too short/simple, missing answer keys, wrong count, or not specific enough), REJECT.
163
+ - Provide specific, actionable feedback on what needs to change.
164
+ - Do NOT output the termination keyword.
165
+ """
166
+ )
src/interview-assistant/app.py CHANGED
@@ -1,219 +1,639 @@
1
  import streamlit as st
2
  import os
3
  import sys
4
- import tempfile
5
- import asyncio
6
- import traceback
7
- import extra_streamlit_components as stx
8
  from dotenv import load_dotenv
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
9
 
10
- # Ensure we can import from local modules
11
- current_dir = os.path.dirname(os.path.abspath(__file__))
12
- if current_dir not in sys.path:
13
- sys.path.append(current_dir)
14
-
15
- from teams.team import get_interview_team
16
- from ui.styles import apply_custom_styles
17
- from ui.hero import render_hero
18
- from ui.sidebar import render_sidebar
19
- from ui.report import generate_markdown_report, create_pdf, render_persistent_view
20
- from common.utility.autogen_model_factory import AutoGenModelFactory
21
-
22
- # Load env variables
23
- load_dotenv()
24
-
25
- st.set_page_config(page_title="Interviewer Assistant", page_icon="👔", layout="wide")
26
-
27
- # Cookie Manager (Must be initialized at top level)
28
- def get_manager():
29
- return stx.CookieManager()
30
-
31
- cookie_manager = get_manager()
32
-
33
- # 1. Apply Styles
34
- apply_custom_styles()
35
-
36
- # 2. Render Sidebar
37
- sidebar_data = render_sidebar(cookie_manager)
38
- mode = sidebar_data["mode"]
39
- job_description = sidebar_data["job_description"]
40
- uploaded_resume = sidebar_data["uploaded_resume"]
41
- linkedin_url = sidebar_data["linkedin_url"]
42
-
43
- # 3. Render Hero
44
- render_hero()
45
-
46
- # ------------------------------------------------------------------------------
47
- # LOGIC & ANALYSIS HELPERS
48
- # ------------------------------------------------------------------------------
49
-
50
- async def run_analysis_stream(model_client, task_msg):
51
- team = get_interview_team(model_client)
52
- # Return the stream generator
53
- stream = team.run_stream(task=task_msg)
54
- return stream
55
-
56
- # ------------------------------------------------------------------------------
57
- # MAIN CONTENT LOGIC
58
- # ------------------------------------------------------------------------------
59
-
60
- if mode == "Candidate":
61
- st.markdown("## 🎓 Candidate Prep Portal")
62
- st.info("This feature is under development. It will allow candidates to take mock interviews based on the generated guide.")
63
- st.image("https://cdn-icons-png.flaticon.com/512/3220/3220565.png", width=150)
64
- st.stop()
65
-
66
- if st.session_state.analyzing:
67
- # Validation
68
- if not os.getenv("OPENAI_API_KEY"):
69
- st.error("Missing OpenAI API Key. Please check your .env file or environment variables.")
70
- st.session_state.analyzing = False
71
- st.stop()
72
- if not job_description:
73
- st.error("Missing Job Description.")
74
- st.session_state.analyzing = False
75
- st.stop()
76
- if not uploaded_resume:
77
- st.error("Missing Resume File.")
78
- st.session_state.analyzing = False
79
- st.stop()
80
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
81
  try:
82
- # Process Input
83
- resume_path = ""
84
- with tempfile.NamedTemporaryFile(delete=False, suffix=f".{uploaded_resume.name.split('.')[-1]}") as tmp_file:
85
- tmp_file.write(uploaded_resume.getvalue())
86
- resume_path = tmp_file.name
87
-
88
- resume_content_msg = f"Candidate Resume File Path: {resume_path} (Please use `read_local_file` to read this)."
89
- if linkedin_url:
90
- resume_content_msg += f"\nAlso check LinkedIn: {linkedin_url} (use `scrape_web_page`)."
91
-
92
- # Execution
93
- prog_bar = st.progress(0)
94
- status_text = st.empty()
95
-
96
- # Create Model Client using Factory
97
- model_client = AutoGenModelFactory.get_model(
98
- provider="openai", model_name="gpt-4-turbo", model_info={"vision": False, "function_calling": True, "json_output": False}
99
- )
100
-
101
- task_msg = f"""
102
- Here is the Job Description:
103
- {job_description}
104
 
105
- {resume_content_msg}
 
106
 
107
- The team must follow the strict workflow:
108
- Profiler -> Job Analyst -> Reviewer -> Evaluator -> Designer.
109
- """
 
110
 
111
- status_text.text("Initializing Agents...")
112
- prog_bar.progress(10)
 
 
 
 
 
113
 
114
- # Create a placeholder for debug output to avoid context issues inside async
115
- debug_placeholder = st.empty()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
116
 
117
- # Define debug print helper
118
- def debug_print(msg):
119
- print(f"DEBUG: {msg}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
120
 
121
- debug_print("Analysis Started. Loop initializing...")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
122
 
123
- async def execute_analysis(placeholder):
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
124
  try:
125
- debug_print("Entering execute_analysis async function")
126
- with st.spinner("Analyzing candidate and designing interview..."):
127
- # Get the stream
128
- debug_print(f"Creating team and stream with task length {len(task_msg)}")
129
- stream = await run_analysis_stream(model_client, task_msg)
130
-
131
- messages = []
132
- msg_count = 0
133
- debug_print("Stream created. Iterating...")
134
-
135
- # Stream messages
136
- async for message in stream:
137
- msg_count += 1
138
- messages.append(message)
139
-
140
- source = getattr(message, 'source', 'Unknown')
141
- content = getattr(message, 'content', '')
142
- debug_print(f"Stream Msg {msg_count}: {source}")
 
 
 
 
 
 
 
 
 
 
 
143
 
144
- if isinstance(content, list):
145
- content = "[Multimodal Content]"
146
- elif not content:
147
- content = "[No Content]"
148
-
149
- # Evidence: Show Data (Source Content) in UI & Console
150
- if isinstance(content, str) and len(content) > 500:
151
- print(f"--- EVIDENCE ({source}) ---\n{content[:5000]}\n---------------------------")
152
- with st.expander(f"📄 Data Source Evidence ({source})", expanded=False):
153
- st.text(content)
154
 
155
- # Update Debug UI safely
156
- placeholder.text(f"[{msg_count}] {source}: {str(content)[:150]}...")
 
157
 
158
- # Progress bar update
159
- if source == "Candidate_Profiler": prog_bar.progress(20)
160
- elif source == "Job_Analyst": prog_bar.progress(40)
161
- elif source == "Job_Analyst_Reviewer": prog_bar.progress(60)
162
- elif source == "Evaluator": prog_bar.progress(80)
163
- elif source == "Interview_Designer": prog_bar.progress(95)
164
-
165
- if msg_count == 0:
166
- st.error("No messages received. Check logs/console.")
167
- debug_print("Stream finished with 0 messages.")
168
- else:
169
- debug_print(f"Stream finished with {msg_count} messages.")
170
-
171
- prog_bar.progress(100)
172
- status_text.text("Analysis Complete.")
173
-
174
- # Generation
175
- final_markdown = generate_markdown_report(messages)
176
-
177
- if not final_markdown.strip():
178
- final_markdown = "## Report Generation Failed\nNo structured output was found from the agent team."
179
-
180
- # Save to State (Persistence)
181
- st.session_state.generated_report = final_markdown
182
- st.session_state.generated_pdf = create_pdf(final_markdown)
183
 
184
- except Exception as e:
185
- st.error(f"An error occurred during analysis: {e}")
186
- import traceback
187
- st.text(traceback.format_exc())
188
- debug_print(f"Async Job Failed: {e}")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
189
 
190
- # Run the async execution
191
- try:
192
- import asyncio
193
- loop = asyncio.new_event_loop()
194
- asyncio.set_event_loop(loop)
 
 
 
195
  try:
196
- loop.run_until_complete(execute_analysis(debug_placeholder))
197
- finally:
198
- # Cleanup pending tasks
199
- pending = asyncio.all_tasks(loop)
200
- for task in pending:
201
- task.cancel()
202
- if pending:
203
- loop.run_until_complete(asyncio.gather(*pending, return_exceptions=True))
204
- loop.close()
205
- except Exception as e:
206
- st.error(f"System Error: {e}")
207
- import traceback
208
- st.text(traceback.format_exc())
209
 
210
- finally:
211
- # Reset analysis state at the end so user can run again
212
- st.session_state.analyzing = False
213
- st.rerun()
214
-
215
- else:
216
- st.info("👈 Please fill in the details in the sidebar to get started.")
217
 
218
- # 4. Render Persistent View (Report)
219
- render_persistent_view()
 
1
  import streamlit as st
2
  import os
3
  import sys
4
+ import sqlite3
5
+ import shutil
6
+ import pandas as pd
 
7
  from dotenv import load_dotenv
8
+ import asyncio
9
+
10
+ # --- Imports ---
11
+ # Ensure local modules can be imported
12
+ # Use insert(0) to prioritize local 'agents' folder over installed 'agents' package
13
+ sys.path.insert(0, os.path.dirname(os.path.abspath(__file__)))
14
+
15
+ from rag.ingest import process_uploaded_files
16
+ from rag.db import get_db
17
+ # from agents.manager import get_agent_manager # Deprecated
18
+ from teams.evaluation_team import run_evaluation_team
19
+ import json
20
+ import re
21
+
22
+ # Load environment variables
23
+ # Assuming app.py is in src/interview-assistant/, root is 2 levels up
24
+ ROOT_DIR = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
25
+ load_dotenv(os.path.join(ROOT_DIR, ".env"))
26
 
27
+ # Set page config
28
+ st.set_page_config(page_title="Agentic Interview Assistant", layout="wide", initial_sidebar_state="collapsed")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
29
 
30
+ # Custom CSS for improvements
31
+ st.markdown("""
32
+ <style>
33
+ .stApp {
34
+ background-color: #f8f9fa;
35
+ }
36
+ .block-container {
37
+ padding-top: 2rem !important;
38
+ padding-bottom: 1rem !important;
39
+ }
40
+ .main-header {
41
+ font-size: 2.0rem;
42
+ color: #1E3A8A;
43
+ font-weight: 700;
44
+ margin-bottom: 0px;
45
+ }
46
+ .card {
47
+ background-color: white;
48
+ padding: 1rem;
49
+ border-radius: 10px;
50
+ box-shadow: 0 4px 6px rgba(0,0,0,0.1);
51
+ margin-bottom: 0.5rem;
52
+ }
53
+ /* Compact the grid */
54
+ div[data-testid="column"] {
55
+ padding: 0 !important;
56
+ }
57
+ p {
58
+ margin-bottom: 0.2rem;
59
+ }
60
+ </style>
61
+ """, unsafe_allow_html=True)
62
+
63
+ # Define Base Directory for persistent storage
64
+ BASE_DIR = os.path.dirname(os.path.abspath(__file__))
65
+ # Ensure data directory exists
66
+ DATA_DIR = os.path.join(BASE_DIR, "data")
67
+ if not os.path.exists(DATA_DIR):
68
+ os.makedirs(DATA_DIR)
69
+
70
+ DB_PATH = os.path.join(DATA_DIR, "interview_state.db")
71
+ RAG_DIR = os.path.join(DATA_DIR, "interview_rag_db")
72
+
73
+ # --- Database Helper Functions ---
74
+
75
+ def init_db():
76
+ conn = sqlite3.connect(DB_PATH)
77
+ c = conn.cursor()
78
+ # Init Tables
79
+ c.execute('''CREATE TABLE IF NOT EXISTS job_context (
80
+ id INTEGER PRIMARY KEY,
81
+ description TEXT
82
+ )''')
83
+ c.execute('''CREATE TABLE IF NOT EXISTS candidates (
84
+ id INTEGER PRIMARY KEY AUTOINCREMENT,
85
+ name TEXT,
86
+ filename TEXT,
87
+ status TEXT,
88
+ score INTEGER,
89
+ strengths TEXT,
90
+ weaknesses TEXT,
91
+ questions TEXT
92
+ )''')
93
+
94
+ # Safe migration for existing dbs
95
  try:
96
+ c.execute("ALTER TABLE candidates ADD COLUMN questions TEXT")
97
+ except sqlite3.OperationalError:
98
+ pass
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
99
 
100
+ conn.commit()
101
+ conn.close()
102
 
103
+ def load_state():
104
+ """Loads JD and Candidates from SQLite to Session State"""
105
+ if not os.path.exists(DB_PATH):
106
+ return
107
 
108
+ conn = sqlite3.connect(DB_PATH)
109
+
110
+ # Load JD
111
+ try:
112
+ jd_df = pd.read_sql_query("SELECT description FROM job_context LIMIT 1", conn)
113
+ if not jd_df.empty:
114
+ st.session_state['jd'] = jd_df.iloc[0]['description']
115
 
116
+ # Load Candidates
117
+ cands_df = pd.read_sql_query("SELECT * FROM candidates", conn)
118
+ if not cands_df.empty:
119
+ st.session_state['candidates'] = cands_df.to_dict('records')
120
+ except Exception as e:
121
+ print(f"Error loading state: {e}")
122
+ finally:
123
+ conn.close()
124
+
125
+ def save_new_session(job_desc, processed_docs):
126
+ """Saves initial session data to SQLite based on processed RAG docs"""
127
+ conn = sqlite3.connect(DB_PATH)
128
+ c = conn.cursor()
129
+
130
+ # Save JD
131
+ c.execute("DELETE FROM job_context")
132
+ c.execute("INSERT INTO job_context (description) VALUES (?)", (job_desc,))
133
+
134
+ # Save Candidates (Deduplicate based on name)
135
+ c.execute("DELETE FROM candidates")
136
+
137
+ # Extract unique candidates from docs
138
+ unique_candidates = {}
139
+ for doc in processed_docs:
140
+ meta = doc['metadata']
141
+ name = meta['candidate_name']
142
+ if name not in unique_candidates:
143
+ unique_candidates[name] = {
144
+ 'name': name,
145
+ 'filename': meta['filename'],
146
+ 'status': 'Pending',
147
+ 'score': 0,
148
+ 'strengths': '-',
149
+ 'weaknesses': '-',
150
+ 'questions': None
151
+ }
152
+
153
+ candidates_list = []
154
+ for cand in unique_candidates.values():
155
+ c.execute("""INSERT INTO candidates (name, filename, status, score, strengths, weaknesses, questions)
156
+ VALUES (?, ?, ?, ?, ?, ?, ?)""",
157
+ (cand['name'], cand['filename'], cand['status'], cand['score'], cand['strengths'], cand['weaknesses'], cand['questions']))
158
+ candidates_list.append(cand)
159
+
160
+ conn.commit()
161
+ conn.close()
162
+ return candidates_list
163
+
164
+ def update_candidate_record(name, status, score, strengths, weaknesses):
165
+ conn = sqlite3.connect(DB_PATH)
166
+ c = conn.cursor()
167
+ c.execute("""UPDATE candidates
168
+ SET status = ?, score = ?, strengths = ?, weaknesses = ?
169
+ WHERE name = ?""",
170
+ (status, score, strengths, weaknesses, name))
171
+ conn.commit()
172
+ conn.close()
173
+
174
+ def update_candidate_questions(name, questions_json_str):
175
+ conn = sqlite3.connect(DB_PATH)
176
+ c = conn.cursor()
177
+ c.execute("UPDATE candidates SET questions = ? WHERE name = ?", (questions_json_str, name))
178
+ conn.commit()
179
+ conn.close()
180
+
181
+ def update_job_description(new_jd):
182
+ conn = sqlite3.connect(DB_PATH)
183
+ c = conn.cursor()
184
+ # Update JD
185
+ c.execute("DELETE FROM job_context")
186
+ c.execute("INSERT INTO job_context (description) VALUES (?)", (new_jd,))
187
+
188
+ # Reset candidates
189
+ c.execute("UPDATE candidates SET status = 'Pending', score = 0, strengths = '-', weaknesses = '-'")
190
+ conn.commit()
191
+ conn.close()
192
+
193
+ # Update session state
194
+ st.session_state['jd'] = new_jd
195
+ for c in st.session_state['candidates']:
196
+ c['status'] = 'Pending'
197
+ c['score'] = 0
198
+ c['strengths'] = '-'
199
+ c['weaknesses'] = '-'
200
+
201
+ # Removed st.experimental_dialog to ensure compatibility
202
+ # JD Editor logic moved to render_dashboard
203
+
204
+ def reset_system():
205
+ """Wipes SQLite and RAG DB"""
206
+ if os.path.exists(DB_PATH):
207
+ try:
208
+ os.remove(DB_PATH)
209
+ except:
210
+ pass
211
+
212
+ # Reset ChromaDB
213
+ try:
214
+ get_db().reset()
215
+ except Exception as e:
216
+ # If DB file doesn't exist yet, that's fine
217
+ print(f"RAG Reset info: {e}")
218
+
219
+ st.session_state.clear()
220
+ st.rerun()
221
+
222
+ # --- App Logic ---
223
+
224
+ # Initialize
225
+ if 'init_done' not in st.session_state:
226
+ init_db()
227
+ st.session_state['init_done'] = True
228
+ st.session_state['candidates'] = []
229
+ st.session_state['jd'] = ""
230
+ load_state() # Load from DB if exists
231
+
232
+ if 'current_view' not in st.session_state:
233
+ st.session_state['current_view'] = 'dashboard'
234
+ if 'selected_candidate_for_studio' not in st.session_state:
235
+ st.session_state['selected_candidate_for_studio'] = None
236
+
237
+ def main():
238
+ # Header
239
+ c1, c2 = st.columns([5, 1])
240
+ with c1:
241
+ st.markdown('<div class="main-header">🤖 Agentic Interview Assistant</div>', unsafe_allow_html=True)
242
+ st.markdown("Your AI-powered partner for talent acquisition.")
243
+
244
+ with c2:
245
+ if st.button("🔄 New Interview", type="secondary", help="Reset all data and start over"):
246
+ reset_system()
247
+
248
+ st.markdown("---")
249
+
250
+ # Check for persistence
251
+ has_active_session = bool(st.session_state['candidates']) and bool(st.session_state['jd'])
252
+
253
+ # --- Flow 1: Upload & Store (Top Section) ---
254
+ if not has_active_session:
255
+ with st.container():
256
+ st.info("👋 Welcome! Please upload resumes and provide a Job Description to get started.")
257
+
258
+ c1, c2 = st.columns([1, 1], gap="small")
259
+ with c1:
260
+ st.markdown("### 1. Candidate Resumes")
261
+ uploaded_files = st.file_uploader("Upload PDF files", type=["pdf"], accept_multiple_files=True)
262
+
263
+ with c2:
264
+ st.markdown("### 2. Job Description")
265
+ jd_input = st.text_area("Paste the JD here...", height=150, placeholder="We are looking for a Senior Python Engineer...")
266
+
267
+ if st.button("🚀 Start Analysis", type="primary", use_container_width=True):
268
+ if uploaded_files and jd_input:
269
+ with st.spinner("Ingesting resumes & building Knowledge Base..."):
270
+ try:
271
+ # 1. Process PDFs
272
+ processed_docs = process_uploaded_files(uploaded_files)
273
+
274
+ if not processed_docs:
275
+ st.error("No text could be extracted from these PDFs.")
276
+ st.stop()
277
+
278
+ # 2. Add to RAG (Chroma)
279
+ get_db().add_documents(processed_docs)
280
+
281
+ # 3. Save Session Metadata to SQLite
282
+ st.session_state['candidates'] = save_new_session(jd_input, processed_docs)
283
+ st.session_state['jd'] = jd_input
284
+
285
+ st.success(f"Successfully processed {len(st.session_state['candidates'])} candidates.")
286
+ st.rerun()
287
+ except Exception as e:
288
+ st.error(f"An error occurred: {e}")
289
+ else:
290
+ st.warning("Please upload files and enter a job description.")
291
+ else:
292
+ # Show mini stats
293
+ total = len(st.session_state['candidates'])
294
+ evaluated = len([c for c in st.session_state['candidates'] if c['status'] == 'Evaluated'])
295
+ avg_score = 0
296
+ if evaluated > 0:
297
+ scores = []
298
+ for c in st.session_state['candidates']:
299
+ if c['status'] == 'Evaluated':
300
+ try:
301
+ scores.append(float(c['score']))
302
+ except (ValueError, TypeError):
303
+ scores.append(0)
304
+
305
+ avg_score = sum(scores) / len(scores) if scores else 0
306
+
307
+ # Compact Summary Row
308
+ col_metrics, col_edit = st.columns([6, 1])
309
+ with col_metrics:
310
+ st.markdown(f"""
311
+ <div style="display: flex; gap: 24px; align-items: center; padding: 10px 15px; background: white; border-radius: 8px; box-shadow: 0 1px 3px rgba(0,0,0,0.1); margin-bottom: 5px; font-size: 0.9rem;">
312
+ <div>📊 <strong>Candidates:</strong> {total}</div>
313
+ <div style="border-left: 1px solid #eee; height: 18px;"></div>
314
+ <div>✅ <strong>Evaluated:</strong> {evaluated}</div>
315
+ <div style="border-left: 1px solid #eee; height: 18px;"></div>
316
+ <div>⭐ <strong>Avg Score:</strong> {avg_score:.1f}</div>
317
+ </div>
318
+ """, unsafe_allow_html=True)
319
 
320
+ with col_edit:
321
+ if st.button("📝 Edit JD", help="View or Update Job Description"):
322
+ st.session_state['show_jd_edit'] = not st.session_state.get('show_jd_edit', False)
323
+
324
+ # Inline Editor for maximum compatibility
325
+ if st.session_state.get('show_jd_edit', False):
326
+ with st.container():
327
+ st.markdown("### Update Job Description")
328
+ st.warning("⚠️ Saving a new JD will reset all candidate evaluations.")
329
+
330
+ new_jd = st.text_area("Job Description", value=st.session_state['jd'], height=200, key="jd_editor_area")
331
+
332
+ ec1, ec2 = st.columns([1, 5])
333
+ with ec1:
334
+ if st.button("💾 Save & Reset", type="primary"):
335
+ update_job_description(new_jd)
336
+ st.session_state['show_jd_edit'] = False
337
+ st.rerun()
338
+ with ec2:
339
+ if st.button("Cancel"):
340
+ st.session_state['show_jd_edit'] = False
341
+ st.rerun()
342
+ st.markdown("---")
343
+
344
+ # st.markdown("---") # Removed large divider
345
+
346
+ # --- Flow Switcher ---
347
+ if has_active_session:
348
+ if st.session_state['current_view'] == 'dashboard':
349
+ render_dashboard()
350
+ else:
351
+ render_studio()
352
+
353
+ def render_dashboard():
354
+ st.subheader("📊 Candidate Dashboard")
355
+
356
+ # ... (Previous code)
357
 
358
+ # Helper function for evaluation (to avoid code duplication)
359
+ async def perform_evaluation(candidate_index, candidate_name, jd_text):
360
+ raw_result = await run_evaluation_team(candidate_name, jd_text)
361
+
362
+ # Improved JSON Extraction
363
+ json_match = re.search(r'\{.*\}', raw_result, re.DOTALL)
364
+ if json_match:
365
+ try:
366
+ result = json.loads(json_match.group(0))
367
+ except json.JSONDecodeError:
368
+ result = {}
369
+ else:
370
+ result = {}
371
+
372
+ # Map new keys (key_matches/gaps) to DB schema (strengths/weaknesses)
373
+ # Fallback to old keys if agent reverts
374
+ score = result.get('score', 0)
375
+ strengths = result.get('key_matches', result.get('strengths', []))
376
+ weaknesses = result.get('gaps', result.get('weaknesses', []))
377
+
378
+ # Ensure they are lists
379
+ if isinstance(strengths, str): strengths = [strengths]
380
+ if isinstance(weaknesses, str): weaknesses = [weaknesses]
381
+
382
+ strengths_str = "\n".join([f"- {s}" for s in strengths]) if strengths else "- None identified"
383
+ weaknesses_str = "\n".join([f"- {w}" for w in weaknesses]) if weaknesses else "- None identified"
384
+
385
+ # Update DB
386
+ update_candidate_record(
387
+ candidate_name,
388
+ 'Evaluated',
389
+ score,
390
+ strengths_str,
391
+ weaknesses_str
392
+ )
393
+
394
+ # Update Session
395
+ st.session_state['candidates'][candidate_index]['status'] = 'Evaluated'
396
+ st.session_state['candidates'][candidate_index]['score'] = score
397
+ st.session_state['candidates'][candidate_index]['strengths'] = strengths_str
398
+ st.session_state['candidates'][candidate_index]['weaknesses'] = weaknesses_str
399
+
400
+ def render_dashboard():
401
+ st.subheader("📊 Candidate Dashboard")
402
+
403
+ # Header Row
404
+ st.markdown("""
405
+ <div style="display: grid; grid-template-columns: 2fr 1fr 2fr 1fr 1fr; gap: 10px; font-weight: bold; margin-bottom: 10px;">
406
+ <div>Candidate Name</div>
407
+ <div>Status</div>
408
+ <div>Fitness Score</div>
409
+ <div>Analysis</div>
410
+ <div>Interview</div>
411
+ </div>
412
+ """, unsafe_allow_html=True)
413
+
414
+ # Report Popup (Simulated with a container at top if active)
415
+ if st.session_state.get('view_report_id') is not None:
416
+ idx = st.session_state['view_report_id']
417
+ # Safety check
418
+ if idx < len(st.session_state['candidates']):
419
+ cand = st.session_state['candidates'][idx]
420
+
421
+ with st.container():
422
+ st.markdown(f"### 📑 Evaluation Report: {cand['name']}")
423
+ rc1, rc2, rc3 = st.columns([1, 1, 1])
424
+ rc1.metric("Score", f"{cand['score']}/10")
425
+
426
+ with rc2:
427
+ st.markdown("**✅ Key Matches**")
428
+ st.markdown(cand.get('strengths', '-'))
429
+
430
+ with rc3:
431
+ st.markdown("**⚠️ Gaps**")
432
+ st.markdown(cand.get('weaknesses', '-'))
433
+
434
+ # Actions
435
+ ac1, ac2 = st.columns([1, 5])
436
+ with ac1:
437
+ if st.button("🔄 Re-evaluate", key=f"re_eval_{idx}"):
438
+ with st.spinner("Re-evaluating..."):
439
+ asyncio.run(perform_evaluation(idx, cand['name'], st.session_state['jd']))
440
+ st.rerun()
441
+ with ac2:
442
+ if st.button("Close Report"):
443
+ st.session_state['view_report_id'] = None
444
+ st.rerun()
445
+ st.divider()
446
+
447
+ for i, candidate in enumerate(st.session_state['candidates']):
448
+ with st.container():
449
+ c1, c2, c3, c4, c5 = st.columns([2, 1, 2, 1, 1])
450
+
451
+ # Name -> Button as Link
452
+ if c1.button(f"📄 {candidate['name']}", key=f"view_{i}", help="Click to view full evaluation details"):
453
+ st.session_state['view_report_id'] = i
454
+ st.rerun()
455
+
456
+ # Status
457
+ status_color = "green" if candidate['status'] == 'Evaluated' else "gray"
458
+ c2.markdown(f":{status_color}[{candidate['status']}]")
459
+
460
+ # Score
461
+ try:
462
+ score_val = float(candidate['score'])
463
+ except (ValueError, TypeError):
464
+ score_val = 0
465
+
466
+ if score_val > 0:
467
+ c3.progress(score_val / 10, text=f"{score_val}/10")
468
+ else:
469
+ c3.markdown("Waiting...")
470
+
471
+ # Evaluate Button (Initial)
472
+ if c4.button("⚡ Evaluate", key=f"eval_{i}", disabled=candidate['status'] == 'Evaluated', use_container_width=True):
473
+ with st.spinner(f"Evaluating {candidate['name']} with Multi-Agent Team..."):
474
+ try:
475
+ asyncio.run(perform_evaluation(i, candidate['name'], st.session_state['jd']))
476
+ st.rerun()
477
+ except Exception as e:
478
+ st.error(f"Failed: {e}")
479
 
480
+ # Design Button
481
+ if c5.button("🎙️ Design", key=f"design_{i}", disabled=candidate['status'] != 'Evaluated', use_container_width=True):
482
+ st.session_state['selected_candidate_for_studio'] = candidate
483
+ st.session_state['current_view'] = 'studio'
484
+ st.rerun()
485
+
486
+ # Compact separator
487
+ st.markdown("<hr style='margin: 2px 0; border: none; border-top: 1px solid #e0e0e0;' />", unsafe_allow_html=True)
488
+
489
+ from teams.interview_team import run_interview_generation_team, run_interview_revision
490
+
491
+ def render_studio():
492
+ candidate = st.session_state['selected_candidate_for_studio']
493
+ if not candidate:
494
+ st.session_state['current_view'] = 'dashboard'
495
+ st.rerun()
496
+
497
+ from pdf_utils import create_interview_guide_pdf
498
+
499
+ def render_studio():
500
+ candidate = st.session_state['selected_candidate_for_studio']
501
+ if not candidate:
502
+ st.session_state['current_view'] = 'dashboard'
503
+ st.rerun()
504
+
505
+ # Session Persistence & Loading
506
+ cand_id = candidate['name']
507
+ state_key = f"questions_{cand_id}"
508
+
509
+ # Load from persistence if missing using helper
510
+ if state_key not in st.session_state:
511
+ stored_q = candidate.get('questions')
512
+ if stored_q:
513
  try:
514
+ st.session_state[state_key] = json.loads(stored_q)
515
+ except:
516
+ st.session_state[state_key] = None
517
+ else:
518
+ st.session_state[state_key] = None
519
+
520
+ c_back, c_title = st.columns([1, 6])
521
+ with c_back:
522
+ if st.button(" Back"):
523
+ st.session_state['current_view'] = 'dashboard'
524
+ st.session_state['selected_candidate_for_studio'] = None
525
+ st.rerun()
526
+ with c_title:
527
+ st.subheader(f"Interview Studio: {candidate['name']}")
528
+
529
+ # Main Studio Layout
530
+ if st.session_state[state_key] is None:
531
+ # Pre-generation View
532
+ st.info("Click below to generate a tailored interview guide based on the candidate's profile and the Job Description.")
533
+ if st.button("🚀 Generate Interview Guide", type="primary"):
534
+ with st.spinner("Team working: Strategist setting weights -> Generator creating questions -> Reviewer validating..."):
535
+ try:
536
+ raw_result = asyncio.run(run_interview_generation_team(candidate['name'], st.session_state['jd']))
537
+
538
+ # Extract JSON
539
+ json_match = re.search(r'\[.*\]', raw_result, re.DOTALL)
540
+ if json_match:
541
+ questions_json = json_match.group(0)
542
+ questions = json.loads(questions_json)
543
 
544
+ # Save to Session
545
+ st.session_state[state_key] = questions
 
 
 
 
 
 
 
 
546
 
547
+ # Save to DB
548
+ update_candidate_questions(candidate['name'], questions_json)
549
+ candidate['questions'] = questions_json
550
 
551
+ st.success("Guide Generated & Saved!")
552
+ st.rerun()
553
+ else:
554
+ st.error("Failed to parse agent output. See logs.")
555
+ st.text(raw_result)
556
+ except Exception as e:
557
+ st.error(f"Generation Failed: {e}")
558
+ else:
559
+ # Post-generation View
560
+ questions = st.session_state[state_key]
561
+
562
+ c_left, c_right = st.columns([2, 1])
563
+
564
+ with c_left:
565
+ st.markdown("### 📝 Interview Guide")
566
+
567
+ # Markdown Display Construction
568
+ md_content = ""
569
+ for q in questions:
570
+ uid = q.get('u_id', '?')
571
+ cat = q.get('category', 'General')
572
+ level = q.get('complexity', '')
573
+ text = q.get('question', '')
574
+ ans = q.get('likely_answer', '')
 
575
 
576
+ md_content += f"#### Q{uid}. {text} \n"
577
+ md_content += f"*Category: {cat} | Level: {level}* \n"
578
+ md_content += f"> **Likely Answer**: {ans}\n\n"
579
+ md_content += "---\n"
580
+
581
+ st.markdown(md_content)
582
+
583
+ st.divider()
584
+ st.markdown("### 💬 Revise Questions")
585
+
586
+ # Revision Chat
587
+ if prompt := st.chat_input("Ex: Make the technical questions harder..."):
588
+ with st.spinner("Agents are revising the guide..."):
589
+ try:
590
+ current_json = json.dumps(questions)
591
+ raw_result = asyncio.run(run_interview_revision(current_json, prompt))
592
+
593
+ json_match = re.search(r'\[.*\]', raw_result, re.DOTALL)
594
+ if json_match:
595
+ new_q_json = json_match.group(0)
596
+ new_questions = json.loads(new_q_json)
597
+
598
+ # Update State & DB
599
+ st.session_state[state_key] = new_questions
600
+ update_candidate_questions(candidate['name'], new_q_json)
601
+ candidate['questions'] = new_q_json
602
+
603
+ st.success("Questions Revised!")
604
+ st.rerun()
605
+ else:
606
+ st.error("Revision failed to output JSON.")
607
+ except Exception as e:
608
+ st.error(f"Revision Error: {e}")
609
 
610
+ with c_right:
611
+ st.markdown("### ⚙️ Actions")
612
+
613
+ if st.button("🔄 Regenerate from Scratch"):
614
+ st.session_state[state_key] = None
615
+ st.rerun()
616
+
617
+ # PDF Generation
618
  try:
619
+ pdf_bytes = create_interview_guide_pdf(candidate['name'], questions)
620
+ st.download_button(
621
+ label="📥 Download PDF",
622
+ data=pdf_bytes,
623
+ file_name=f"{candidate['name'].replace(' ', '_')}_Interview_Guide.pdf",
624
+ mime="application/pdf",
625
+ type="primary"
626
+ )
627
+ except Exception as e:
628
+ st.error(f"PDF Generation Error: {e}")
 
 
 
629
 
630
+ # Keep JSON as backup/debug
631
+ st.download_button(
632
+ label="Download JSON Source",
633
+ data=json.dumps(questions, indent=2),
634
+ file_name=f"{candidate['name']}_guide.json",
635
+ mime="application/json"
636
+ )
637
 
638
+ if __name__ == "__main__":
639
+ main()
src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9ac6972cb4666769a17755f17c5727f676f11a742e9553bf3a21119ab54394
3
+ size 167600
src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e81c3b22454233bc12d0762f06dcca48261a75231cf87c79b75e69a6c00150
3
+ size 100
src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a12e561363385e9dfeeab326368731c030ed4b374e7f5897ac819159d2884c5
3
+ size 400
src/interview-assistant/data/interview_rag_db/0f85dba8-1000-48f9-8919-0d1580c4761c/link_lists.bin ADDED
File without changes
src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9ac6972cb4666769a17755f17c5727f676f11a742e9553bf3a21119ab54394
3
+ size 167600
src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e81c3b22454233bc12d0762f06dcca48261a75231cf87c79b75e69a6c00150
3
+ size 100
src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ee8e64450c6462a2a72155da1d5b83595487151bb0c4635c6aa2df0614a38c3f
3
+ size 400
src/interview-assistant/data/interview_rag_db/501d9e40-4881-42db-91c5-b97be285c8f8/link_lists.bin ADDED
File without changes
src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9ac6972cb4666769a17755f17c5727f676f11a742e9553bf3a21119ab54394
3
+ size 167600
src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e81c3b22454233bc12d0762f06dcca48261a75231cf87c79b75e69a6c00150
3
+ size 100
src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7a12e561363385e9dfeeab326368731c030ed4b374e7f5897ac819159d2884c5
3
+ size 400
src/interview-assistant/data/interview_rag_db/72b4b7ac-2c9d-43e6-84dd-0f3aed61b719/link_lists.bin ADDED
File without changes
src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/data_level0.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:ea9ac6972cb4666769a17755f17c5727f676f11a742e9553bf3a21119ab54394
3
+ size 167600
src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/header.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:a0e81c3b22454233bc12d0762f06dcca48261a75231cf87c79b75e69a6c00150
3
+ size 100
src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/length.bin ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:7ea54909aafe13191f08136f421a3a658757333a33663acc763452c608287439
3
+ size 400
src/interview-assistant/data/interview_rag_db/cd9cc320-056d-410f-ae58-46d063488a30/link_lists.bin ADDED
File without changes
src/interview-assistant/data/interview_rag_db/chroma.sqlite3 ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:351eec403e53e7aa7ca98f5f3107cd88e8bd82eacf1c7919d1d00a5a1fe4e759
3
+ size 495616
src/interview-assistant/data/interview_state.db ADDED
Binary file (16.4 kB). View file
 
src/interview-assistant/implementation.md ADDED
@@ -0,0 +1,115 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Interview Assistant Implementation Plan
2
+
3
+ ## 1. Overview
4
+ The Interview Assistant is a RAG-powered multi-agent system designed to streamline the recruitment process. It allows interviewers to upload candidate resumes, analyze them against specific job descriptions, and generate tailored interview questions.
5
+
6
+ We will use **`autogen-agentchat`** to orchestrate the multi-agent workflow for resume analysis, fitness evaluation, and interview design.
7
+
8
+ ## 2. Architecture
9
+
10
+ ### High-Level Components
11
+ 1. **Streamlit App**: Serves as both the UI and the control layer.
12
+ 2. **RAG Engine (ChromaDB)**: Stores vector embeddings of candidate resumes.
13
+ 3. **Session DB (SQLite)**: Stores structured application state.
14
+ 4. **Agentic Core**: `autogen-agentchat` agents:
15
+ * **Evaluation Team**: `JD_Summarizer`, `Resume_Summarizer`, `Evaluator`, `Coordinator`.
16
+ * **Interview Team**: `Interview_Strategist`, `Question_Generator`, `Question_Reviewer`.
17
+ * **Tools**: `search_candidate_knowledge_base`.
18
+
19
+ ### Persistence & Reset
20
+ * The app checks **SQLite** on startup. If a session exists, it loads the Dashboard view immediately.
21
+ * **"New Interview" Button**: Clears both ChromaDB collections and the SQLite tables to start fresh.
22
+
23
+ ### Agent Workflow
24
+ * **UserProxy**: Configured to allow human input if needed, but primarily acts as the bridge for the Streamlit app.
25
+ * **ResumeAnalyst**: Queries the RAG system to evaluate candidates against the Job Description.
26
+ * **Interviewer**: Generates specific questions based on the Analyst's report.
27
+
28
+ ## 3. Technology Stack
29
+ * **UI & Server**: Streamlit (Python).
30
+ * **AI/ML**:
31
+ * **Framework**: `autogen-agentchat` (Microsoft AutoGen).
32
+ * **Vector DB**: ChromaDB (Local persistent).
33
+ * **Orchestration**: Agents will access ChromaDB via registered tools (`register_function`).
34
+ * **Embeddings**: OpenAI or compatible (SentenceTransformers).
35
+ * **LLM**: Ollama (Gemma 3 4B) hosted on Ollama Cloud.
36
+
37
+ ## 4. Directory Structure
38
+ ```text
39
+ src/interview-assistant/
40
+ ├── app.py # Streamlit Entry point (Main UI)
41
+ ├── agents/ # Agent Definitions
42
+ │ ├── __init__.py
43
+ │ └── definitions.py # JD_Summarizer, Resume_Summarizer, Evaluator, Coordinator
44
+ ├── teams/ # Team Orchestration
45
+ │ ├── __init__.py
46
+ │ └── evaluation_team.py # Evaluation Workflow (RoundRobin)
47
+ ├── data/ # Persistent Storage (SQlite + ChromaDB)
48
+ ├── rag/ # Retrieval Augmented Generation
49
+ │ ├── __init__.py
50
+ │ ├── db.py # ChromaDB Interface
51
+ │ └── ingest.py # PDF/Text Parsing & Chunking
52
+ └── implementation.md # This file
53
+ ```
54
+
55
+ ## 5. User Flows & Implementation Strategy
56
+
57
+ ### Flow 1: Ingestion & Dashboard (✅ Completed)
58
+ **Objective**: Upload data and prepare the workspace.
59
+ * **Input**:
60
+ * File Uploader (Multiple PDFs).
61
+ * Job Description (Text Area).
62
+ * "Submit" Button.
63
+ * **System Action**:
64
+ * Parse PDFs and ingest into **ChromaDB** with metadata (`name`, `filename`).
65
+ * Store JD in session state.
66
+ * Initialize the **Candidate Grid**.
67
+ * **Output**: A data grid displaying discovered candidates. Columns: `[Select]`, `Name`, `Status` (Pending/Evaluated), `Score`, `Actions`.
68
+
69
+ ### Flow 2: On-Demand Evaluation (✅ Completed)
70
+ **Objective**: Multi-Agent analysis of specific candidates.
71
+ * **Input**: User clicks "Evaluate" on a specific candidate row.
72
+ * **System Action**:
73
+ * **Evaluation Team** (`RoundRobinGroupChat`) is triggered in `src/interview-assistant/teams/evaluation_team.py`.
74
+ * **JD_Summarizer**: Extracts key criteria.
75
+ * **Resume_Summarizer**: Fetches resume evidence from RAG.
76
+ * **Evaluator**: Scores and analyzes.
77
+ * **Coordinator**: Validates JSON output.
78
+ * **Output**: Updates the grid data with `Fitness Score`, `Strengths`, `Weaknesses` in SQLite.
79
+ * **Output**: The grid refreshes to show the new data with progress bars.
80
+
81
+ ### Flow 3: Interview Design Studio (✅ Completed)
82
+ **Objective**: Interactive question generation and revision.
83
+ * **Trigger**: User selects a candidate (who is evaluated) and enters the "Interview Studio".
84
+ * **Interface**:
85
+ * "Generate Interview Guide" Button (One-click generation).
86
+ * Chat Interface for **Revision** (e.g., "Make technical questions harder").
87
+ * **System Action**:
88
+ * **Interview Generation Team** (`teams/interview_team.py`):
89
+ * **Interview_Strategist**: Sets weights (Tech/Leadership/Behavioral) based on candidate profile.
90
+ * **Question_Generator**: Creates 20 questions.
91
+ * **Question_Reviewer**: Validates output.
92
+ * **Revision Flow**: Updates existing questions based on user feedback.
93
+ * **Output**:
94
+ * Interactive Expandable List of Questions.
95
+ * JSON Download.
96
+ * Persistence in SQLite for session reloading.
97
+
98
+ ## 6. Implementation Steps
99
+
100
+ ### Phase 1: Core & RAG (✅ Done)
101
+ 1. **Frontend**: Build the layout in `app.py`.
102
+ 2. **RAG**: Implement `ingest_resumes(files)` to populate ChromaDB.
103
+ 3. **Persistence**: SQLite database for session state.
104
+
105
+ ### Phase 2: Evaluation Agent (✅ Done)
106
+ 1. Define agents in `agents/definitions.py`.
107
+ 2. Implement `run_evaluation_team` in `teams/evaluation_team.py`.
108
+ 3. Connect "Evaluate" button in UI to the team runner.
109
+
110
+ ### Phase 3: Interview Chat & PDF (✅ Done)
111
+ 1. **Interview Agents**: Created `Interview_Strategist`, `Question_Generator`, `Question_Reviewer` in `agents/definitions.py`.
112
+ 2. **Team Helper**: Created `teams/interview_team.py` with `run_interview_generation_team` and `run_interview_revision`.
113
+ 3. **App Integration**: Integrated Generation and Revision flows in `render_studio` in `app.py`.
114
+ 4. **Persistence**: Added `questions` column to SQLite schema for saving state.
115
+ 5. **PDF Generation**: (Pending: PDF Export logic is currently JSON-only, PDF button is a placeholder).
src/interview-assistant/pdf_utils.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from fpdf import FPDF
2
+ import io
3
+
4
+ class InterviewGuidePDF(FPDF):
5
+ def header(self):
6
+ self.set_font('Arial', 'B', 15)
7
+ self.cell(0, 10, 'Interview Guide', 0, 1, 'C')
8
+ self.ln(5)
9
+
10
+ def footer(self):
11
+ self.set_y(-15)
12
+ self.set_font('Arial', 'I', 8)
13
+ self.cell(0, 10, f'Page {self.page_no()}', 0, 0, 'C')
14
+
15
+ def create_interview_guide_pdf(candidate_name, questions):
16
+ pdf = InterviewGuidePDF()
17
+ pdf.add_page()
18
+
19
+ # Candidate Info
20
+ pdf.set_font('Arial', 'B', 12)
21
+ pdf.cell(0, 10, f"Candidate: {candidate_name}", 0, 1)
22
+ pdf.ln(5)
23
+
24
+ # Questions
25
+ pdf.set_font('Arial', '', 11)
26
+
27
+ for q in questions:
28
+ # Avoid orphan lines/page breaks in weird spots
29
+ if pdf.get_y() > 250:
30
+ pdf.add_page()
31
+
32
+ # Question Header
33
+ q_id = q.get('u_id', '?')
34
+ cat = q.get('category', 'General')
35
+ comp = q.get('complexity', 'Medium')
36
+
37
+ pdf.set_font('Arial', 'B', 11)
38
+ pdf.multi_cell(0, 7, f"Q{q_id} [{cat} - {comp}]")
39
+
40
+ # Question Body
41
+ pdf.set_font('Arial', '', 11)
42
+ pdf.multi_cell(0, 7, q.get('question', ''))
43
+ pdf.ln(2)
44
+
45
+ # Answer Key
46
+ pdf.set_font('Arial', 'I', 10)
47
+ pdf.set_text_color(100, 100, 100) # Gray for answers
48
+ pdf.multi_cell(0, 7, f"Likely Answer: {q.get('likely_answer', '')}")
49
+ pdf.set_text_color(0, 0, 0) # Reset to black
50
+
51
+ pdf.ln(5) # Spacing between questions
52
+
53
+ return pdf.output(dest='S').encode('latin-1', 'replace')
src/interview-assistant/rag/db.py ADDED
@@ -0,0 +1,94 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import chromadb
2
+ import uuid
3
+ import os
4
+ import shutil
5
+
6
+ # Check if we should use OpenAI embeddings (optional, sticking to default for now for ease of setup)
7
+ # from chromadb.utils import embedding_functions
8
+
9
+ # Define Base Directory (src/interview-assistant/data)
10
+ BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
11
+ DATA_DIR = os.path.join(BASE_DIR, "data")
12
+ DB_DIR = os.path.join(DATA_DIR, "interview_rag_db")
13
+ COLLECTION_NAME = "candidates"
14
+
15
+ class RAGDatabase:
16
+ def __init__(self):
17
+ self.client = chromadb.PersistentClient(path=DB_DIR)
18
+
19
+ # Using default embedding function (Sentence Transformers)
20
+ # This requires 'sentence-transformers' installed.
21
+ self.collection = self.client.get_or_create_collection(name=COLLECTION_NAME)
22
+
23
+ def add_documents(self, processed_docs):
24
+ """
25
+ Adds processed documents to the vector store.
26
+ processed_docs: List of {'text': str, 'metadata': dict}
27
+ """
28
+ ids = []
29
+ documents = []
30
+ metadatas = []
31
+
32
+ for doc in processed_docs:
33
+ full_text = doc['text']
34
+ base_metadata = doc['metadata']
35
+
36
+ # Simple chunking strategy: 1000 characters with 200 overlap
37
+ chunk_size = 1000
38
+ overlap = 200
39
+
40
+ for i in range(0, len(full_text), chunk_size - overlap):
41
+ chunk = full_text[i:i + chunk_size]
42
+ if len(chunk) < 50: # Skip tiny chunks
43
+ continue
44
+
45
+ # Add chunk index to metadata maybe?
46
+ chunk_metadata = base_metadata.copy()
47
+ chunk_metadata['chunk_index'] = i
48
+
49
+ ids.append(str(uuid.uuid4()))
50
+ documents.append(chunk)
51
+ metadatas.append(chunk_metadata)
52
+
53
+ if documents:
54
+ self.collection.add(
55
+ documents=documents,
56
+ metadatas=metadatas,
57
+ ids=ids
58
+ )
59
+ return True
60
+ return False
61
+
62
+ def query(self, query_text, candidate_name=None, n_results=5):
63
+ """
64
+ Query the database.
65
+ If candidate_name is provided, filters results to that candidate.
66
+ """
67
+ where_filter = None
68
+ if candidate_name:
69
+ where_filter = {"candidate_name": candidate_name}
70
+
71
+ results = self.collection.query(
72
+ query_texts=[query_text],
73
+ n_results=n_results,
74
+ where=where_filter
75
+ )
76
+ return results
77
+
78
+ def reset(self):
79
+ """
80
+ Deletes the collection and recreates it.
81
+ """
82
+ self.client.delete_collection(COLLECTION_NAME)
83
+ self.collection = self.client.get_or_create_collection(name=COLLECTION_NAME)
84
+ # Verify empty
85
+ print(f"Collection {COLLECTION_NAME} reset. Count: {self.collection.count()}")
86
+
87
+ # Singleton instance access
88
+ _db_instance = None
89
+
90
+ def get_db():
91
+ global _db_instance
92
+ if _db_instance is None:
93
+ _db_instance = RAGDatabase()
94
+ return _db_instance
src/interview-assistant/rag/ingest.py ADDED
@@ -0,0 +1,53 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import io
2
+ import re
3
+ from pypdf import PdfReader
4
+
5
+ def extract_text_from_pdf(file_bytes):
6
+ """
7
+ Extracts text from a PDF file object (BytesIO).
8
+ """
9
+ try:
10
+ reader = PdfReader(file_bytes)
11
+ text = ""
12
+ for page in reader.pages:
13
+ text += page.extract_text() + "\n"
14
+ return text
15
+ except Exception as e:
16
+ print(f"Error reading PDF: {e}")
17
+ return ""
18
+
19
+ def clean_text(text):
20
+ """
21
+ Simple text cleanup: remove excessive whitespace.
22
+ """
23
+ text = re.sub(r'\s+', ' ', text)
24
+ return text.strip()
25
+
26
+ def process_uploaded_files(uploaded_files):
27
+ """
28
+ Processes a list of Streamlit UploadedFile objects.
29
+ Returns a list of dicts: {'text': str, 'metadata': dict}
30
+ """
31
+ processed_docs = []
32
+
33
+ for uploaded_file in uploaded_files:
34
+ # Create a BytesIO object from the uploaded file
35
+ # Streamlit UploadedFile is already file-like, but let's be safe
36
+ text = extract_text_from_pdf(uploaded_file)
37
+ if not text:
38
+ continue
39
+
40
+ cleaned_text = clean_text(text)
41
+
42
+ # Heuristic: Name is often the first line, but filename is safer for unique ID
43
+ candidate_name = uploaded_file.name.replace(".pdf", "").replace("_", " ").title()
44
+
45
+ processed_docs.append({
46
+ "text": cleaned_text,
47
+ "metadata": {
48
+ "filename": uploaded_file.name,
49
+ "candidate_name": candidate_name
50
+ }
51
+ })
52
+
53
+ return processed_docs
src/interview-assistant/teams/evaluation_team.py ADDED
@@ -0,0 +1,72 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.teams import RoundRobinGroupChat
2
+ from autogen_agentchat.conditions import TextMentionTermination, MaxMessageTermination
3
+ from autogen_agentchat.agents import UserProxyAgent
4
+ from agents.definitions import (
5
+ get_evaluator,
6
+ get_coordinator
7
+ )
8
+
9
+ async def run_evaluation_team(candidate_name: str, job_description: str):
10
+ # PRE-OPTIMIZATION: Fetch context directly to save LLM turns
11
+ from tools.rag_tools import search_candidate_knowledge_base
12
+ print(f"[DEBUG] Pre-fetching context for {candidate_name}...", flush=True)
13
+ resume_context = await search_candidate_knowledge_base(f"Summary and skills for {candidate_name}", candidate_name)
14
+
15
+ # Initialize Agents (Reduced Team)
16
+ # We merged JD & Resume analysis into the prompt for the Evaluator
17
+ evaluator = get_evaluator()
18
+ coordinator = get_coordinator()
19
+
20
+ TERMINATION_KEYWORD = "EVALUATION" + "_" + "APPROVED"
21
+
22
+ # Define Termination
23
+ termination = TextMentionTermination(TERMINATION_KEYWORD) | MaxMessageTermination(10)
24
+
25
+ # Create Team (2 Agents)
26
+ # Flow: Evaluator (Analysis) -> Coordinator (Validation) -> Loop
27
+ team = RoundRobinGroupChat(
28
+ participants=[evaluator, coordinator],
29
+ termination_condition=termination
30
+ )
31
+
32
+ # Initial Prompt
33
+ task = f"""
34
+ PROJECT: Candidate Evaluation
35
+ Candidate Name: {candidate_name}
36
+
37
+ JOB DESCRIPTION:
38
+ {job_description}
39
+
40
+ RESUME CONTEXT (Pre-retrieved):
41
+ {resume_context}
42
+
43
+ GOAL: Produce a high-quality, data-driven evaluation JSON.
44
+
45
+ PROCESS:
46
+ 1. Evaluator: Analyze JD requirements vs Resume Context. Score (0-10) and identify Strengths/Weaknesses.
47
+ 2. Coordinator: Review against strict JSON rules.
48
+
49
+ ITERATION RULES:
50
+ - If Coordinator REJECTS, Evaluator must fix.
51
+ - When satisfied, Coordinator outputs "EVALUATION", underscore, "APPROVED".
52
+ """
53
+
54
+ print(f"[DEBUG] Starting Evaluation Team for {candidate_name}")
55
+ last_message = ""
56
+ last_json_message = ""
57
+
58
+ async for message in team.run_stream(task=task):
59
+ if hasattr(message, 'content') and isinstance(message.content, str):
60
+ content = message.content
61
+ print(f"[DEBUG] Agent '{message.source}' says: {content[:60]}...")
62
+ last_message = content
63
+
64
+ # Simple heuristic to trap the JSON payload
65
+ # look for keys that MUST be present
66
+ if '"score"' in content and '"key_matches"' in content:
67
+ last_json_message = content
68
+
69
+ print(f"[DEBUG] Evaluation Team finished. Final message length: {len(last_message)}")
70
+
71
+ # Prefer the structured JSON message if we found one
72
+ return last_json_message if last_json_message else last_message
src/interview-assistant/teams/interview_team.py ADDED
@@ -0,0 +1,103 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from autogen_agentchat.teams import RoundRobinGroupChat
2
+ from autogen_agentchat.conditions import TextMentionTermination, MaxMessageTermination
3
+ from autogen_agentchat.agents import UserProxyAgent
4
+ from agents.definitions import (
5
+ get_question_generator,
6
+ get_question_reviewer
7
+ )
8
+ from tools.rag_tools import search_candidate_knowledge_base
9
+
10
+ async def run_interview_generation_team(candidate_name: str, job_description: str):
11
+ # Pre-fetch Context to simplify agent flow
12
+ # We query for the candidate name itself to get a general overview + strengths/weaknesses
13
+ print(f"[DEBUG] Pre-fetching context for {candidate_name}...", flush=True)
14
+ resume_context = await search_candidate_knowledge_base(f"Summary and skills for {candidate_name}", candidate_name)
15
+
16
+ # Initialize Agents
17
+ # OPTIMIZATION: Removed Strategist agent to save calls. Generator handles both.
18
+ generator = get_question_generator()
19
+ reviewer = get_question_reviewer()
20
+
21
+ TERMINATION_KEYWORD = "GUIDE" + "_" + "APPROVED"
22
+
23
+ # Define Termination (Reduced max turns slightly as we have fewer agents)
24
+ termination = TextMentionTermination(TERMINATION_KEYWORD) | MaxMessageTermination(15)
25
+
26
+ # Create Team (2 Agents)
27
+ # Flow: Generator -> Reviewer -> Loop
28
+ team = RoundRobinGroupChat(
29
+ participants=[generator, reviewer],
30
+ termination_condition=termination
31
+ )
32
+
33
+ # Initial Prompt
34
+ task = f"""
35
+ PROJECT: Interview Guide Generation
36
+ Candidate Name: {candidate_name}
37
+ Job Description: {job_description}
38
+
39
+ RESUME CONTEXT:
40
+ {resume_context}
41
+
42
+ GOAL: Create a high-quality, detailed Interview Guide (10 Questions).
43
+
44
+ PROCESS:
45
+ 1. Generator:
46
+ - First, analyze Role/Seniority (e.g. Architect = System Design, Junior = Syntax) and determine Weights.
47
+ - Then, generate 10 Detailed Questions (50-100 words each) based on that strategy. Group by Category.
48
+ 2. Reviewer: Critically review against checklist.
49
+
50
+ ITERATION RULES:
51
+ - If Reviewer REJECTS, Generator must rewrite.
52
+ - Loop until quality is perfect.
53
+ - When satisfied, output JSON + "GUIDE", underscore, "APPROVED".
54
+ """
55
+
56
+ print(f"[DEBUG] Starting Interview Generation Team for {candidate_name}", flush=True)
57
+
58
+ last_json_message = ""
59
+ last_message = ""
60
+
61
+ async for message in team.run_stream(task=task):
62
+ if hasattr(message, 'content') and isinstance(message.content, str):
63
+ content = message.content
64
+ print(f"[DEBUG] Agent '{message.source}' says: {content[:60]}...", flush=True)
65
+ last_message = content
66
+
67
+ # Check if this message looks like it contains the questions list
68
+ # We look for a JSON array pattern with at least some content
69
+ if "[" in content and "]" in content and "question" in content:
70
+ last_json_message = content
71
+
72
+ print(f"[DEBUG] Interview Generation Team finished.", flush=True)
73
+
74
+ # Return the last message that had JSON, otherwise fallback to the very last message
75
+ return last_json_message if last_json_message else last_message
76
+
77
+ async def run_interview_revision(current_questions: str, feedback: str):
78
+ generator = get_question_generator()
79
+ user_proxy = UserProxyAgent("User")
80
+
81
+ # Simple chat for revision
82
+ team = RoundRobinGroupChat(
83
+ participants=[generator],
84
+ termination_condition=MaxMessageTermination(5)
85
+ )
86
+
87
+ task = f"""
88
+ Current Questions (JSON): {current_questions}
89
+
90
+ User Feedback: {feedback}
91
+
92
+ TASK: Revise the questions based on the feedback.
93
+ Output the FULL revised list of 10 questions in the same JSON list format.
94
+ """
95
+
96
+ print(f"[DEBUG] Starting Revision", flush=True)
97
+ last_message = ""
98
+ async for message in team.run_stream(task=task):
99
+ if hasattr(message, 'content') and isinstance(message.content, str):
100
+ print(f"[DEBUG] Revision Agent '{message.source}': {message.content[:60]}...", flush=True)
101
+ last_message = message.content
102
+
103
+ return last_message
src/interview-assistant/tools/rag_tools.py ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from rag.db import get_db
2
+
3
+ async def search_candidate_knowledge_base(query: str, candidate_name: str) -> str:
4
+ """Searches RAG for candidate details."""
5
+ print(f"[DEBUG] Tool 'search_candidate_knowledge_base' called with query='{query}', candidate='{candidate_name}'")
6
+ db = get_db()
7
+ results = db.query(query, candidate_name=candidate_name, n_results=3)
8
+
9
+ if not results['documents'][0]:
10
+ print(f"[DEBUG] Tool found NO results for {candidate_name}")
11
+ return "No relevant info found."
12
+
13
+ print(f"[DEBUG] Tool found {len(results['documents'][0])} segments for {candidate_name}")
14
+ return f"Context for {candidate_name}:\n" + "\n".join(results['documents'][0])
uv.lock CHANGED
@@ -3,7 +3,7 @@ revision = 3
3
  requires-python = "==3.12.*"
4
 
5
  [[package]]
6
- name = "agents"
7
  version = "0.1.0"
8
  source = { editable = "." }
9
  dependencies = [
@@ -11,6 +11,7 @@ dependencies = [
11
  { name = "anthropic" },
12
  { name = "arxiv" },
13
  { name = "asyncio" },
 
14
  { name = "autogen-agentchat" },
15
  { name = "autogen-ext", extra = ["grpc", "mcp", "ollama", "openai"] },
16
  { name = "azure-identity" },
@@ -23,6 +24,9 @@ dependencies = [
23
  { name = "faiss-cpu" },
24
  { name = "fastapi" },
25
  { name = "fpdf" },
 
 
 
26
  { name = "google-search-results" },
27
  { name = "html2text" },
28
  { name = "httpx" },
@@ -93,6 +97,7 @@ requires-dist = [
93
  { name = "anthropic", specifier = ">=0.49.0" },
94
  { name = "arxiv", specifier = ">=2.3.1" },
95
  { name = "asyncio" },
 
96
  { name = "autogen-agentchat", specifier = ">=0.7.5" },
97
  { name = "autogen-ext", extras = ["grpc", "mcp", "ollama", "openai"], specifier = ">=0.7.5" },
98
  { name = "azure-identity", specifier = ">=1.25.1" },
@@ -105,6 +110,9 @@ requires-dist = [
105
  { name = "faiss-cpu", specifier = ">=1.13.0" },
106
  { name = "fastapi" },
107
  { name = "fpdf" },
 
 
 
108
  { name = "google-search-results" },
109
  { name = "html2text", specifier = ">=2025.4.15" },
110
  { name = "httpx", specifier = ">=0.28.1" },
@@ -189,7 +197,7 @@ wheels = [
189
 
190
  [[package]]
191
  name = "aiohttp"
192
- version = "3.13.2"
193
  source = { registry = "https://pypi.org/simple" }
194
  dependencies = [
195
  { name = "aiohappyeyeballs" },
@@ -200,25 +208,25 @@ dependencies = [
200
  { name = "propcache" },
201
  { name = "yarl" },
202
  ]
203
- sdist = { url = "https://files.pythonhosted.org/packages/1c/ce/3b83ebba6b3207a7135e5fcaba49706f8a4b6008153b4e30540c982fae26/aiohttp-3.13.2.tar.gz", hash = "sha256:40176a52c186aefef6eb3cad2cdd30cd06e3afbe88fe8ab2af9c0b90f228daca", size = 7837994, upload-time = "2025-10-28T20:59:39.937Z" }
204
  wheels = [
205
- { url = "https://files.pythonhosted.org/packages/29/9b/01f00e9856d0a73260e86dd8ed0c2234a466c5c1712ce1c281548df39777/aiohttp-3.13.2-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b1e56bab2e12b2b9ed300218c351ee2a3d8c8fdab5b1ec6193e11a817767e47b", size = 737623, upload-time = "2025-10-28T20:56:30.797Z" },
206
- { url = "https://files.pythonhosted.org/packages/5a/1b/4be39c445e2b2bd0aab4ba736deb649fabf14f6757f405f0c9685019b9e9/aiohttp-3.13.2-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:364e25edaabd3d37b1db1f0cbcee8c73c9a3727bfa262b83e5e4cf3489a2a9dc", size = 492664, upload-time = "2025-10-28T20:56:32.708Z" },
207
- { url = "https://files.pythonhosted.org/packages/28/66/d35dcfea8050e131cdd731dff36434390479b4045a8d0b9d7111b0a968f1/aiohttp-3.13.2-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:c5c94825f744694c4b8db20b71dba9a257cd2ba8e010a803042123f3a25d50d7", size = 491808, upload-time = "2025-10-28T20:56:34.57Z" },
208
- { url = "https://files.pythonhosted.org/packages/00/29/8e4609b93e10a853b65f8291e64985de66d4f5848c5637cddc70e98f01f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:ba2715d842ffa787be87cbfce150d5e88c87a98e0b62e0f5aa489169a393dbbb", size = 1738863, upload-time = "2025-10-28T20:56:36.377Z" },
209
- { url = "https://files.pythonhosted.org/packages/9d/fa/4ebdf4adcc0def75ced1a0d2d227577cd7b1b85beb7edad85fcc87693c75/aiohttp-3.13.2-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:585542825c4bc662221fb257889e011a5aa00f1ae4d75d1d246a5225289183e3", size = 1700586, upload-time = "2025-10-28T20:56:38.034Z" },
210
- { url = "https://files.pythonhosted.org/packages/da/04/73f5f02ff348a3558763ff6abe99c223381b0bace05cd4530a0258e52597/aiohttp-3.13.2-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:39d02cb6025fe1aabca329c5632f48c9532a3dabccd859e7e2f110668972331f", size = 1768625, upload-time = "2025-10-28T20:56:39.75Z" },
211
- { url = "https://files.pythonhosted.org/packages/f8/49/a825b79ffec124317265ca7d2344a86bcffeb960743487cb11988ffb3494/aiohttp-3.13.2-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:e67446b19e014d37342f7195f592a2a948141d15a312fe0e700c2fd2f03124f6", size = 1867281, upload-time = "2025-10-28T20:56:41.471Z" },
212
- { url = "https://files.pythonhosted.org/packages/b9/48/adf56e05f81eac31edcfae45c90928f4ad50ef2e3ea72cb8376162a368f8/aiohttp-3.13.2-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:4356474ad6333e41ccefd39eae869ba15a6c5299c9c01dfdcfdd5c107be4363e", size = 1752431, upload-time = "2025-10-28T20:56:43.162Z" },
213
- { url = "https://files.pythonhosted.org/packages/30/ab/593855356eead019a74e862f21523db09c27f12fd24af72dbc3555b9bfd9/aiohttp-3.13.2-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:eeacf451c99b4525f700f078becff32c32ec327b10dcf31306a8a52d78166de7", size = 1562846, upload-time = "2025-10-28T20:56:44.85Z" },
214
- { url = "https://files.pythonhosted.org/packages/39/0f/9f3d32271aa8dc35036e9668e31870a9d3b9542dd6b3e2c8a30931cb27ae/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:d8a9b889aeabd7a4e9af0b7f4ab5ad94d42e7ff679aaec6d0db21e3b639ad58d", size = 1699606, upload-time = "2025-10-28T20:56:46.519Z" },
215
- { url = "https://files.pythonhosted.org/packages/2c/3c/52d2658c5699b6ef7692a3f7128b2d2d4d9775f2a68093f74bca06cf01e1/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:fa89cb11bc71a63b69568d5b8a25c3ca25b6d54c15f907ca1c130d72f320b76b", size = 1720663, upload-time = "2025-10-28T20:56:48.528Z" },
216
- { url = "https://files.pythonhosted.org/packages/9b/d4/8f8f3ff1fb7fb9e3f04fcad4e89d8a1cd8fc7d05de67e3de5b15b33008ff/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:8aa7c807df234f693fed0ecd507192fc97692e61fee5702cdc11155d2e5cadc8", size = 1737939, upload-time = "2025-10-28T20:56:50.77Z" },
217
- { url = "https://files.pythonhosted.org/packages/03/d3/ddd348f8a27a634daae39a1b8e291ff19c77867af438af844bf8b7e3231b/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:9eb3e33fdbe43f88c3c75fa608c25e7c47bbd80f48d012763cb67c47f39a7e16", size = 1555132, upload-time = "2025-10-28T20:56:52.568Z" },
218
- { url = "https://files.pythonhosted.org/packages/39/b8/46790692dc46218406f94374903ba47552f2f9f90dad554eed61bfb7b64c/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:9434bc0d80076138ea986833156c5a48c9c7a8abb0c96039ddbb4afc93184169", size = 1764802, upload-time = "2025-10-28T20:56:54.292Z" },
219
- { url = "https://files.pythonhosted.org/packages/ba/e4/19ce547b58ab2a385e5f0b8aa3db38674785085abcf79b6e0edd1632b12f/aiohttp-3.13.2-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:ff15c147b2ad66da1f2cbb0622313f2242d8e6e8f9b79b5206c84523a4473248", size = 1719512, upload-time = "2025-10-28T20:56:56.428Z" },
220
- { url = "https://files.pythonhosted.org/packages/70/30/6355a737fed29dcb6dfdd48682d5790cb5eab050f7b4e01f49b121d3acad/aiohttp-3.13.2-cp312-cp312-win32.whl", hash = "sha256:27e569eb9d9e95dbd55c0fc3ec3a9335defbf1d8bc1d20171a49f3c4c607b93e", size = 426690, upload-time = "2025-10-28T20:56:58.736Z" },
221
- { url = "https://files.pythonhosted.org/packages/0a/0d/b10ac09069973d112de6ef980c1f6bb31cb7dcd0bc363acbdad58f927873/aiohttp-3.13.2-cp312-cp312-win_amd64.whl", hash = "sha256:8709a0f05d59a71f33fd05c17fc11fcb8c30140506e13c2f5e8ee1b8964e1b45", size = 453465, upload-time = "2025-10-28T20:57:00.795Z" },
222
  ]
223
 
224
  [[package]]
@@ -367,6 +375,18 @@ wheels = [
367
  { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" },
368
  ]
369
 
 
 
 
 
 
 
 
 
 
 
 
 
370
  [[package]]
371
  name = "autogen-agentchat"
372
  version = "0.7.5"
@@ -427,15 +447,15 @@ openai = [
427
 
428
  [[package]]
429
  name = "azure-core"
430
- version = "1.37.0"
431
  source = { registry = "https://pypi.org/simple" }
432
  dependencies = [
433
  { name = "requests" },
434
  { name = "typing-extensions" },
435
  ]
436
- sdist = { url = "https://files.pythonhosted.org/packages/ef/83/41c9371c8298999c67b007e308a0a3c4d6a59c6908fa9c62101f031f886f/azure_core-1.37.0.tar.gz", hash = "sha256:7064f2c11e4b97f340e8e8c6d923b822978be3016e46b7bc4aa4b337cfb48aee", size = 357620, upload-time = "2025-12-11T20:05:13.518Z" }
437
  wheels = [
438
- { url = "https://files.pythonhosted.org/packages/ee/34/a9914e676971a13d6cc671b1ed172f9804b50a3a80a143ff196e52f4c7ee/azure_core-1.37.0-py3-none-any.whl", hash = "sha256:b3abe2c59e7d6bb18b38c275a5029ff80f98990e7c90a5e646249a56630fcc19", size = 214006, upload-time = "2025-12-11T20:05:14.96Z" },
439
  ]
440
 
441
  [[package]]
@@ -1038,11 +1058,11 @@ wheels = [
1038
 
1039
  [[package]]
1040
  name = "filelock"
1041
- version = "3.20.1"
1042
  source = { registry = "https://pypi.org/simple" }
1043
- sdist = { url = "https://files.pythonhosted.org/packages/a7/23/ce7a1126827cedeb958fc043d61745754464eb56c5937c35bbf2b8e26f34/filelock-3.20.1.tar.gz", hash = "sha256:b8360948b351b80f420878d8516519a2204b07aefcdcfd24912a5d33127f188c", size = 19476, upload-time = "2025-12-15T23:54:28.027Z" }
1044
  wheels = [
1045
- { url = "https://files.pythonhosted.org/packages/e3/7f/a1a97644e39e7316d850784c642093c99df1290a460df4ede27659056834/filelock-3.20.1-py3-none-any.whl", hash = "sha256:15d9e9a67306188a44baa72f569d2bfd803076269365fdea0934385da4dc361a", size = 16666, upload-time = "2025-12-15T23:54:26.874Z" },
1046
  ]
1047
 
1048
  [[package]]
@@ -1177,6 +1197,32 @@ requests = [
1177
  { name = "requests" },
1178
  ]
1179
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1180
  [[package]]
1181
  name = "google-genai"
1182
  version = "1.55.0"
@@ -1363,6 +1409,18 @@ wheels = [
1363
  { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
1364
  ]
1365
 
 
 
 
 
 
 
 
 
 
 
 
 
1366
  [[package]]
1367
  name = "httptools"
1368
  version = "0.7.1"
@@ -1726,12 +1784,11 @@ wheels = [
1726
 
1727
  [[package]]
1728
  name = "kubernetes"
1729
- version = "34.1.0"
1730
  source = { registry = "https://pypi.org/simple" }
1731
  dependencies = [
1732
  { name = "certifi" },
1733
  { name = "durationpy" },
1734
- { name = "google-auth" },
1735
  { name = "python-dateutil" },
1736
  { name = "pyyaml" },
1737
  { name = "requests" },
@@ -1740,9 +1797,9 @@ dependencies = [
1740
  { name = "urllib3" },
1741
  { name = "websocket-client" },
1742
  ]
1743
- sdist = { url = "https://files.pythonhosted.org/packages/ef/55/3f880ef65f559cbed44a9aa20d3bdbc219a2c3a3bac4a30a513029b03ee9/kubernetes-34.1.0.tar.gz", hash = "sha256:8fe8edb0b5d290a2f3ac06596b23f87c658977d46b5f8df9d0f4ea83d0003912", size = 1083771, upload-time = "2025-09-29T20:23:49.283Z" }
1744
  wheels = [
1745
- { url = "https://files.pythonhosted.org/packages/ca/ec/65f7d563aa4a62dd58777e8f6aa882f15db53b14eb29aba0c28a20f7eb26/kubernetes-34.1.0-py2.py3-none-any.whl", hash = "sha256:bffba2272534e224e6a7a74d582deb0b545b7c9879d2cd9e4aae9481d1f2cc2a", size = 2008380, upload-time = "2025-09-29T20:23:47.684Z" },
1746
  ]
1747
 
1748
  [[package]]
@@ -3727,11 +3784,11 @@ wheels = [
3727
 
3728
  [[package]]
3729
  name = "pyasn1"
3730
- version = "0.6.1"
3731
  source = { registry = "https://pypi.org/simple" }
3732
- sdist = { url = "https://files.pythonhosted.org/packages/ba/e9/01f1a64245b89f039897cb0130016d79f77d52669aae6ee7b159a6c4c018/pyasn1-0.6.1.tar.gz", hash = "sha256:6f580d2bdd84365380830acf45550f2511469f673cb4a5ae3857a3170128b034", size = 145322, upload-time = "2024-09-10T22:41:42.55Z" }
3733
  wheels = [
3734
- { url = "https://files.pythonhosted.org/packages/c8/f1/d6a797abb14f6283c0ddff96bbdd46937f64122b8c925cab503dd37f8214/pyasn1-0.6.1-py3-none-any.whl", hash = "sha256:0d632f46f2ba09143da3a8afe9e33fb6f92fa2320ab7e886e2d0f7672af84629", size = 83135, upload-time = "2024-09-11T16:00:36.122Z" },
3735
  ]
3736
 
3737
  [[package]]
@@ -3870,6 +3927,15 @@ wheels = [
3870
  { url = "https://files.pythonhosted.org/packages/9b/4d/b9add7c84060d4c1906abe9a7e5359f2a60f7a9a4f67268b2766673427d8/pyee-13.0.0-py3-none-any.whl", hash = "sha256:48195a3cddb3b1515ce0695ed76036b5ccc2ef3a9f963ff9f77aec0139845498", size = 15730, upload-time = "2025-03-17T18:53:14.532Z" },
3871
  ]
3872
 
 
 
 
 
 
 
 
 
 
3873
  [[package]]
3874
  name = "pygments"
3875
  version = "2.19.2"
@@ -3919,11 +3985,11 @@ wheels = [
3919
 
3920
  [[package]]
3921
  name = "pypdf"
3922
- version = "6.4.2"
3923
  source = { registry = "https://pypi.org/simple" }
3924
- sdist = { url = "https://files.pythonhosted.org/packages/bd/c2/b59b02ff7f2dc006799d2c5dc3a8877686890abdd915176ef799070edf17/pypdf-6.4.2.tar.gz", hash = "sha256:c466ff1272ffb4712c2348d2bbc3019bc93f1c62ccfaf50808e3b9f13c3dc527", size = 5275502, upload-time = "2025-12-14T14:30:58.58Z" }
3925
  wheels = [
3926
- { url = "https://files.pythonhosted.org/packages/38/99/3147435e15ccd97c0451efc3d13495dc22602e9887f81e64f1b135bae821/pypdf-6.4.2-py3-none-any.whl", hash = "sha256:014dcff867fd99fc0b6fc90ed1f7e1347ef2317ae038a489c2caa64106d268f4", size = 328212, upload-time = "2025-12-14T14:30:56.701Z" },
3927
  ]
3928
 
3929
  [[package]]
@@ -4876,11 +4942,11 @@ wheels = [
4876
 
4877
  [[package]]
4878
  name = "urllib3"
4879
- version = "2.3.0"
4880
  source = { registry = "https://pypi.org/simple" }
4881
- sdist = { url = "https://files.pythonhosted.org/packages/aa/63/e53da845320b757bf29ef6a9062f5c669fe997973f966045cb019c3f4b66/urllib3-2.3.0.tar.gz", hash = "sha256:f8c5449b3cf0861679ce7e0503c7b44b5ec981bec0d1d3795a07f1ba96f0204d", size = 307268, upload-time = "2024-12-22T07:47:30.032Z" }
4882
  wheels = [
4883
- { url = "https://files.pythonhosted.org/packages/c8/19/4ec628951a74043532ca2cf5d97b7b14863931476d117c471e8e2b1eb39f/urllib3-2.3.0-py3-none-any.whl", hash = "sha256:1cee9ad369867bfdbbb48b7dd50374c0967a0bb7710050facf0dd6911440e3df", size = 128369, upload-time = "2024-12-22T07:47:28.074Z" },
4884
  ]
4885
 
4886
  [[package]]
 
3
  requires-python = "==3.12.*"
4
 
5
  [[package]]
6
+ name = "agenticai"
7
  version = "0.1.0"
8
  source = { editable = "." }
9
  dependencies = [
 
11
  { name = "anthropic" },
12
  { name = "arxiv" },
13
  { name = "asyncio" },
14
+ { name = "autoflake" },
15
  { name = "autogen-agentchat" },
16
  { name = "autogen-ext", extra = ["grpc", "mcp", "ollama", "openai"] },
17
  { name = "azure-identity" },
 
24
  { name = "faiss-cpu" },
25
  { name = "fastapi" },
26
  { name = "fpdf" },
27
+ { name = "google-auth" },
28
+ { name = "google-auth-httplib2" },
29
+ { name = "google-auth-oauthlib" },
30
  { name = "google-search-results" },
31
  { name = "html2text" },
32
  { name = "httpx" },
 
97
  { name = "anthropic", specifier = ">=0.49.0" },
98
  { name = "arxiv", specifier = ">=2.3.1" },
99
  { name = "asyncio" },
100
+ { name = "autoflake", specifier = ">=1.5.0" },
101
  { name = "autogen-agentchat", specifier = ">=0.7.5" },
102
  { name = "autogen-ext", extras = ["grpc", "mcp", "ollama", "openai"], specifier = ">=0.7.5" },
103
  { name = "azure-identity", specifier = ">=1.25.1" },
 
110
  { name = "faiss-cpu", specifier = ">=1.13.0" },
111
  { name = "fastapi" },
112
  { name = "fpdf" },
113
+ { name = "google-auth", specifier = ">=2.22.0" },
114
+ { name = "google-auth-httplib2", specifier = ">=0.1.0" },
115
+ { name = "google-auth-oauthlib", specifier = ">=0.4.6" },
116
  { name = "google-search-results" },
117
  { name = "html2text", specifier = ">=2025.4.15" },
118
  { name = "httpx", specifier = ">=0.28.1" },
 
197
 
198
  [[package]]
199
  name = "aiohttp"
200
+ version = "3.13.3"
201
  source = { registry = "https://pypi.org/simple" }
202
  dependencies = [
203
  { name = "aiohappyeyeballs" },
 
208
  { name = "propcache" },
209
  { name = "yarl" },
210
  ]
211
+ sdist = { url = "https://files.pythonhosted.org/packages/50/42/32cf8e7704ceb4481406eb87161349abb46a57fee3f008ba9cb610968646/aiohttp-3.13.3.tar.gz", hash = "sha256:a949eee43d3782f2daae4f4a2819b2cb9b0c5d3b7f7a927067cc84dafdbb9f88", size = 7844556, upload-time = "2026-01-03T17:33:05.204Z" }
212
  wheels = [
213
+ { url = "https://files.pythonhosted.org/packages/a0/be/4fc11f202955a69e0db803a12a062b8379c970c7c84f4882b6da17337cc1/aiohttp-3.13.3-cp312-cp312-macosx_10_13_universal2.whl", hash = "sha256:b903a4dfee7d347e2d87697d0713be59e0b87925be030c9178c5faa58ea58d5c", size = 739732, upload-time = "2026-01-03T17:30:14.23Z" },
214
+ { url = "https://files.pythonhosted.org/packages/97/2c/621d5b851f94fa0bb7430d6089b3aa970a9d9b75196bc93bb624b0db237a/aiohttp-3.13.3-cp312-cp312-macosx_10_13_x86_64.whl", hash = "sha256:a45530014d7a1e09f4a55f4f43097ba0fd155089372e105e4bff4ca76cb1b168", size = 494293, upload-time = "2026-01-03T17:30:15.96Z" },
215
+ { url = "https://files.pythonhosted.org/packages/5d/43/4be01406b78e1be8320bb8316dc9c42dbab553d281c40364e0f862d5661c/aiohttp-3.13.3-cp312-cp312-macosx_11_0_arm64.whl", hash = "sha256:27234ef6d85c914f9efeb77ff616dbf4ad2380be0cda40b4db086ffc7ddd1b7d", size = 493533, upload-time = "2026-01-03T17:30:17.431Z" },
216
+ { url = "https://files.pythonhosted.org/packages/8d/a8/5a35dc56a06a2c90d4742cbf35294396907027f80eea696637945a106f25/aiohttp-3.13.3-cp312-cp312-manylinux2014_aarch64.manylinux_2_17_aarch64.manylinux_2_28_aarch64.whl", hash = "sha256:d32764c6c9aafb7fb55366a224756387cd50bfa720f32b88e0e6fa45b27dcf29", size = 1737839, upload-time = "2026-01-03T17:30:19.422Z" },
217
+ { url = "https://files.pythonhosted.org/packages/bf/62/4b9eeb331da56530bf2e198a297e5303e1c1ebdceeb00fe9b568a65c5a0c/aiohttp-3.13.3-cp312-cp312-manylinux2014_armv7l.manylinux_2_17_armv7l.manylinux_2_31_armv7l.whl", hash = "sha256:b1a6102b4d3ebc07dad44fbf07b45bb600300f15b552ddf1851b5390202ea2e3", size = 1703932, upload-time = "2026-01-03T17:30:21.756Z" },
218
+ { url = "https://files.pythonhosted.org/packages/7c/f6/af16887b5d419e6a367095994c0b1332d154f647e7dc2bd50e61876e8e3d/aiohttp-3.13.3-cp312-cp312-manylinux2014_ppc64le.manylinux_2_17_ppc64le.manylinux_2_28_ppc64le.whl", hash = "sha256:c014c7ea7fb775dd015b2d3137378b7be0249a448a1612268b5a90c2d81de04d", size = 1771906, upload-time = "2026-01-03T17:30:23.932Z" },
219
+ { url = "https://files.pythonhosted.org/packages/ce/83/397c634b1bcc24292fa1e0c7822800f9f6569e32934bdeef09dae7992dfb/aiohttp-3.13.3-cp312-cp312-manylinux2014_s390x.manylinux_2_17_s390x.manylinux_2_28_s390x.whl", hash = "sha256:2b8d8ddba8f95ba17582226f80e2de99c7a7948e66490ef8d947e272a93e9463", size = 1871020, upload-time = "2026-01-03T17:30:26Z" },
220
+ { url = "https://files.pythonhosted.org/packages/86/f6/a62cbbf13f0ac80a70f71b1672feba90fdb21fd7abd8dbf25c0105fb6fa3/aiohttp-3.13.3-cp312-cp312-manylinux2014_x86_64.manylinux_2_17_x86_64.manylinux_2_28_x86_64.whl", hash = "sha256:9ae8dd55c8e6c4257eae3a20fd2c8f41edaea5992ed67156642493b8daf3cecc", size = 1755181, upload-time = "2026-01-03T17:30:27.554Z" },
221
+ { url = "https://files.pythonhosted.org/packages/0a/87/20a35ad487efdd3fba93d5843efdfaa62d2f1479eaafa7453398a44faf13/aiohttp-3.13.3-cp312-cp312-manylinux_2_31_riscv64.manylinux_2_39_riscv64.whl", hash = "sha256:01ad2529d4b5035578f5081606a465f3b814c542882804e2e8cda61adf5c71bf", size = 1561794, upload-time = "2026-01-03T17:30:29.254Z" },
222
+ { url = "https://files.pythonhosted.org/packages/de/95/8fd69a66682012f6716e1bc09ef8a1a2a91922c5725cb904689f112309c4/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_aarch64.whl", hash = "sha256:bb4f7475e359992b580559e008c598091c45b5088f28614e855e42d39c2f1033", size = 1697900, upload-time = "2026-01-03T17:30:31.033Z" },
223
+ { url = "https://files.pythonhosted.org/packages/e5/66/7b94b3b5ba70e955ff597672dad1691333080e37f50280178967aff68657/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_armv7l.whl", hash = "sha256:c19b90316ad3b24c69cd78d5c9b4f3aa4497643685901185b65166293d36a00f", size = 1728239, upload-time = "2026-01-03T17:30:32.703Z" },
224
+ { url = "https://files.pythonhosted.org/packages/47/71/6f72f77f9f7d74719692ab65a2a0252584bf8d5f301e2ecb4c0da734530a/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_ppc64le.whl", hash = "sha256:96d604498a7c782cb15a51c406acaea70d8c027ee6b90c569baa6e7b93073679", size = 1740527, upload-time = "2026-01-03T17:30:34.695Z" },
225
+ { url = "https://files.pythonhosted.org/packages/fa/b4/75ec16cbbd5c01bdaf4a05b19e103e78d7ce1ef7c80867eb0ace42ff4488/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_riscv64.whl", hash = "sha256:084911a532763e9d3dd95adf78a78f4096cd5f58cdc18e6fdbc1b58417a45423", size = 1554489, upload-time = "2026-01-03T17:30:36.864Z" },
226
+ { url = "https://files.pythonhosted.org/packages/52/8f/bc518c0eea29f8406dcf7ed1f96c9b48e3bc3995a96159b3fc11f9e08321/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_s390x.whl", hash = "sha256:7a4a94eb787e606d0a09404b9c38c113d3b099d508021faa615d70a0131907ce", size = 1767852, upload-time = "2026-01-03T17:30:39.433Z" },
227
+ { url = "https://files.pythonhosted.org/packages/9d/f2/a07a75173124f31f11ea6f863dc44e6f09afe2bca45dd4e64979490deab1/aiohttp-3.13.3-cp312-cp312-musllinux_1_2_x86_64.whl", hash = "sha256:87797e645d9d8e222e04160ee32aa06bc5c163e8499f24db719e7852ec23093a", size = 1722379, upload-time = "2026-01-03T17:30:41.081Z" },
228
+ { url = "https://files.pythonhosted.org/packages/3c/4a/1a3fee7c21350cac78e5c5cef711bac1b94feca07399f3d406972e2d8fcd/aiohttp-3.13.3-cp312-cp312-win32.whl", hash = "sha256:b04be762396457bef43f3597c991e192ee7da460a4953d7e647ee4b1c28e7046", size = 428253, upload-time = "2026-01-03T17:30:42.644Z" },
229
+ { url = "https://files.pythonhosted.org/packages/d9/b7/76175c7cb4eb73d91ad63c34e29fc4f77c9386bba4a65b53ba8e05ee3c39/aiohttp-3.13.3-cp312-cp312-win_amd64.whl", hash = "sha256:e3531d63d3bdfa7e3ac5e9b27b2dd7ec9df3206a98e0b3445fa906f233264c57", size = 455407, upload-time = "2026-01-03T17:30:44.195Z" },
230
  ]
231
 
232
  [[package]]
 
375
  { url = "https://files.pythonhosted.org/packages/3a/2a/7cc015f5b9f5db42b7d48157e23356022889fc354a2813c15934b7cb5c0e/attrs-25.4.0-py3-none-any.whl", hash = "sha256:adcf7e2a1fb3b36ac48d97835bb6d8ade15b8dcce26aba8bf1d14847b57a3373", size = 67615, upload-time = "2025-10-06T13:54:43.17Z" },
376
  ]
377
 
378
+ [[package]]
379
+ name = "autoflake"
380
+ version = "2.3.1"
381
+ source = { registry = "https://pypi.org/simple" }
382
+ dependencies = [
383
+ { name = "pyflakes" },
384
+ ]
385
+ sdist = { url = "https://files.pythonhosted.org/packages/2a/cb/486f912d6171bc5748c311a2984a301f4e2d054833a1da78485866c71522/autoflake-2.3.1.tar.gz", hash = "sha256:c98b75dc5b0a86459c4f01a1d32ac7eb4338ec4317a4469515ff1e687ecd909e", size = 27642, upload-time = "2024-03-13T03:41:28.977Z" }
386
+ wheels = [
387
+ { url = "https://files.pythonhosted.org/packages/a2/ee/3fd29bf416eb4f1c5579cf12bf393ae954099258abd7bde03c4f9716ef6b/autoflake-2.3.1-py3-none-any.whl", hash = "sha256:3ae7495db9084b7b32818b4140e6dc4fc280b712fb414f5b8fe57b0a8e85a840", size = 32483, upload-time = "2024-03-13T03:41:26.969Z" },
388
+ ]
389
+
390
  [[package]]
391
  name = "autogen-agentchat"
392
  version = "0.7.5"
 
447
 
448
  [[package]]
449
  name = "azure-core"
450
+ version = "1.38.0"
451
  source = { registry = "https://pypi.org/simple" }
452
  dependencies = [
453
  { name = "requests" },
454
  { name = "typing-extensions" },
455
  ]
456
+ sdist = { url = "https://files.pythonhosted.org/packages/dc/1b/e503e08e755ea94e7d3419c9242315f888fc664211c90d032e40479022bf/azure_core-1.38.0.tar.gz", hash = "sha256:8194d2682245a3e4e3151a667c686464c3786fed7918b394d035bdcd61bb5993", size = 363033, upload-time = "2026-01-12T17:03:05.535Z" }
457
  wheels = [
458
+ { url = "https://files.pythonhosted.org/packages/fc/d8/b8fcba9464f02b121f39de2db2bf57f0b216fe11d014513d666e8634380d/azure_core-1.38.0-py3-none-any.whl", hash = "sha256:ab0c9b2cd71fecb1842d52c965c95285d3cfb38902f6766e4a471f1cd8905335", size = 217825, upload-time = "2026-01-12T17:03:07.291Z" },
459
  ]
460
 
461
  [[package]]
 
1058
 
1059
  [[package]]
1060
  name = "filelock"
1061
+ version = "3.20.3"
1062
  source = { registry = "https://pypi.org/simple" }
1063
+ sdist = { url = "https://files.pythonhosted.org/packages/1d/65/ce7f1b70157833bf3cb851b556a37d4547ceafc158aa9b34b36782f23696/filelock-3.20.3.tar.gz", hash = "sha256:18c57ee915c7ec61cff0ecf7f0f869936c7c30191bb0cf406f1341778d0834e1", size = 19485, upload-time = "2026-01-09T17:55:05.421Z" }
1064
  wheels = [
1065
+ { url = "https://files.pythonhosted.org/packages/b5/36/7fb70f04bf00bc646cd5bb45aa9eddb15e19437a28b8fb2b4a5249fac770/filelock-3.20.3-py3-none-any.whl", hash = "sha256:4b0dda527ee31078689fc205ec4f1c1bf7d56cf88b6dc9426c4f230e46c2dce1", size = 16701, upload-time = "2026-01-09T17:55:04.334Z" },
1066
  ]
1067
 
1068
  [[package]]
 
1197
  { name = "requests" },
1198
  ]
1199
 
1200
+ [[package]]
1201
+ name = "google-auth-httplib2"
1202
+ version = "0.3.0"
1203
+ source = { registry = "https://pypi.org/simple" }
1204
+ dependencies = [
1205
+ { name = "google-auth" },
1206
+ { name = "httplib2" },
1207
+ ]
1208
+ sdist = { url = "https://files.pythonhosted.org/packages/d5/ad/c1f2b1175096a8d04cf202ad5ea6065f108d26be6fc7215876bde4a7981d/google_auth_httplib2-0.3.0.tar.gz", hash = "sha256:177898a0175252480d5ed916aeea183c2df87c1f9c26705d74ae6b951c268b0b", size = 11134, upload-time = "2025-12-15T22:13:51.825Z" }
1209
+ wheels = [
1210
+ { url = "https://files.pythonhosted.org/packages/99/d5/3c97526c8796d3caf5f4b3bed2b05e8a7102326f00a334e7a438237f3b22/google_auth_httplib2-0.3.0-py3-none-any.whl", hash = "sha256:426167e5df066e3f5a0fc7ea18768c08e7296046594ce4c8c409c2457dd1f776", size = 9529, upload-time = "2025-12-15T22:13:51.048Z" },
1211
+ ]
1212
+
1213
+ [[package]]
1214
+ name = "google-auth-oauthlib"
1215
+ version = "1.2.2"
1216
+ source = { registry = "https://pypi.org/simple" }
1217
+ dependencies = [
1218
+ { name = "google-auth" },
1219
+ { name = "requests-oauthlib" },
1220
+ ]
1221
+ sdist = { url = "https://files.pythonhosted.org/packages/fb/87/e10bf24f7bcffc1421b84d6f9c3377c30ec305d082cd737ddaa6d8f77f7c/google_auth_oauthlib-1.2.2.tar.gz", hash = "sha256:11046fb8d3348b296302dd939ace8af0a724042e8029c1b872d87fabc9f41684", size = 20955, upload-time = "2025-04-22T16:40:29.172Z" }
1222
+ wheels = [
1223
+ { url = "https://files.pythonhosted.org/packages/ac/84/40ee070be95771acd2f4418981edb834979424565c3eec3cd88b6aa09d24/google_auth_oauthlib-1.2.2-py3-none-any.whl", hash = "sha256:fd619506f4b3908b5df17b65f39ca8d66ea56986e5472eb5978fd8f3786f00a2", size = 19072, upload-time = "2025-04-22T16:40:28.174Z" },
1224
+ ]
1225
+
1226
  [[package]]
1227
  name = "google-genai"
1228
  version = "1.55.0"
 
1409
  { url = "https://files.pythonhosted.org/packages/7e/f5/f66802a942d491edb555dd61e3a9961140fd64c90bce1eafd741609d334d/httpcore-1.0.9-py3-none-any.whl", hash = "sha256:2d400746a40668fc9dec9810239072b40b4484b640a8c38fd654a024c7a1bf55", size = 78784, upload-time = "2025-04-24T22:06:20.566Z" },
1410
  ]
1411
 
1412
+ [[package]]
1413
+ name = "httplib2"
1414
+ version = "0.31.0"
1415
+ source = { registry = "https://pypi.org/simple" }
1416
+ dependencies = [
1417
+ { name = "pyparsing" },
1418
+ ]
1419
+ sdist = { url = "https://files.pythonhosted.org/packages/52/77/6653db69c1f7ecfe5e3f9726fdadc981794656fcd7d98c4209fecfea9993/httplib2-0.31.0.tar.gz", hash = "sha256:ac7ab497c50975147d4f7b1ade44becc7df2f8954d42b38b3d69c515f531135c", size = 250759, upload-time = "2025-09-11T12:16:03.403Z" }
1420
+ wheels = [
1421
+ { url = "https://files.pythonhosted.org/packages/8c/a2/0d269db0f6163be503775dc8b6a6fa15820cc9fdc866f6ba608d86b721f2/httplib2-0.31.0-py3-none-any.whl", hash = "sha256:b9cd78abea9b4e43a7714c6e0f8b6b8561a6fc1e95d5dbd367f5bf0ef35f5d24", size = 91148, upload-time = "2025-09-11T12:16:01.803Z" },
1422
+ ]
1423
+
1424
  [[package]]
1425
  name = "httptools"
1426
  version = "0.7.1"
 
1784
 
1785
  [[package]]
1786
  name = "kubernetes"
1787
+ version = "35.0.0"
1788
  source = { registry = "https://pypi.org/simple" }
1789
  dependencies = [
1790
  { name = "certifi" },
1791
  { name = "durationpy" },
 
1792
  { name = "python-dateutil" },
1793
  { name = "pyyaml" },
1794
  { name = "requests" },
 
1797
  { name = "urllib3" },
1798
  { name = "websocket-client" },
1799
  ]
1800
+ sdist = { url = "https://files.pythonhosted.org/packages/2c/8f/85bf51ad4150f64e8c665daf0d9dfe9787ae92005efb9a4d1cba592bd79d/kubernetes-35.0.0.tar.gz", hash = "sha256:3d00d344944239821458b9efd484d6df9f011da367ecb155dadf9513f05f09ee", size = 1094642, upload-time = "2026-01-16T01:05:27.76Z" }
1801
  wheels = [
1802
+ { url = "https://files.pythonhosted.org/packages/0c/70/05b685ea2dffcb2adbf3cdcea5d8865b7bc66f67249084cf845012a0ff13/kubernetes-35.0.0-py2.py3-none-any.whl", hash = "sha256:39e2b33b46e5834ef6c3985ebfe2047ab39135d41de51ce7641a7ca5b372a13d", size = 2017602, upload-time = "2026-01-16T01:05:25.991Z" },
1803
  ]
1804
 
1805
  [[package]]
 
3784
 
3785
  [[package]]
3786
  name = "pyasn1"
3787
+ version = "0.6.2"
3788
  source = { registry = "https://pypi.org/simple" }
3789
+ sdist = { url = "https://files.pythonhosted.org/packages/fe/b6/6e630dff89739fcd427e3f72b3d905ce0acb85a45d4ec3e2678718a3487f/pyasn1-0.6.2.tar.gz", hash = "sha256:9b59a2b25ba7e4f8197db7686c09fb33e658b98339fadb826e9512629017833b", size = 146586, upload-time = "2026-01-16T18:04:18.534Z" }
3790
  wheels = [
3791
+ { url = "https://files.pythonhosted.org/packages/44/b5/a96872e5184f354da9c84ae119971a0a4c221fe9b27a4d94bd43f2596727/pyasn1-0.6.2-py3-none-any.whl", hash = "sha256:1eb26d860996a18e9b6ed05e7aae0e9fc21619fcee6af91cca9bad4fbea224bf", size = 83371, upload-time = "2026-01-16T18:04:17.174Z" },
3792
  ]
3793
 
3794
  [[package]]
 
3927
  { url = "https://files.pythonhosted.org/packages/9b/4d/b9add7c84060d4c1906abe9a7e5359f2a60f7a9a4f67268b2766673427d8/pyee-13.0.0-py3-none-any.whl", hash = "sha256:48195a3cddb3b1515ce0695ed76036b5ccc2ef3a9f963ff9f77aec0139845498", size = 15730, upload-time = "2025-03-17T18:53:14.532Z" },
3928
  ]
3929
 
3930
+ [[package]]
3931
+ name = "pyflakes"
3932
+ version = "3.4.0"
3933
+ source = { registry = "https://pypi.org/simple" }
3934
+ sdist = { url = "https://files.pythonhosted.org/packages/45/dc/fd034dc20b4b264b3d015808458391acbf9df40b1e54750ef175d39180b1/pyflakes-3.4.0.tar.gz", hash = "sha256:b24f96fafb7d2ab0ec5075b7350b3d2d2218eab42003821c06344973d3ea2f58", size = 64669, upload-time = "2025-06-20T18:45:27.834Z" }
3935
+ wheels = [
3936
+ { url = "https://files.pythonhosted.org/packages/c2/2f/81d580a0fb83baeb066698975cb14a618bdbed7720678566f1b046a95fe8/pyflakes-3.4.0-py2.py3-none-any.whl", hash = "sha256:f742a7dbd0d9cb9ea41e9a24a918996e8170c799fa528688d40dd582c8265f4f", size = 63551, upload-time = "2025-06-20T18:45:26.937Z" },
3937
+ ]
3938
+
3939
  [[package]]
3940
  name = "pygments"
3941
  version = "2.19.2"
 
3985
 
3986
  [[package]]
3987
  name = "pypdf"
3988
+ version = "6.6.0"
3989
  source = { registry = "https://pypi.org/simple" }
3990
+ sdist = { url = "https://files.pythonhosted.org/packages/d8/f4/801632a8b62a805378b6af2b5a3fcbfd8923abf647e0ed1af846a83433b2/pypdf-6.6.0.tar.gz", hash = "sha256:4c887ef2ea38d86faded61141995a3c7d068c9d6ae8477be7ae5de8a8e16592f", size = 5281063, upload-time = "2026-01-09T11:20:11.786Z" }
3991
  wheels = [
3992
+ { url = "https://files.pythonhosted.org/packages/b2/ba/96f99276194f720e74ed99905a080f6e77810558874e8935e580331b46de/pypdf-6.6.0-py3-none-any.whl", hash = "sha256:bca9091ef6de36c7b1a81e09327c554b7ce51e88dad68f5890c2b4a4417f1fd7", size = 328963, upload-time = "2026-01-09T11:20:09.278Z" },
3993
  ]
3994
 
3995
  [[package]]
 
4942
 
4943
  [[package]]
4944
  name = "urllib3"
4945
+ version = "2.6.3"
4946
  source = { registry = "https://pypi.org/simple" }
4947
+ sdist = { url = "https://files.pythonhosted.org/packages/c7/24/5f1b3bdffd70275f6661c76461e25f024d5a38a46f04aaca912426a2b1d3/urllib3-2.6.3.tar.gz", hash = "sha256:1b62b6884944a57dbe321509ab94fd4d3b307075e0c2eae991ac71ee15ad38ed", size = 435556, upload-time = "2026-01-07T16:24:43.925Z" }
4948
  wheels = [
4949
+ { url = "https://files.pythonhosted.org/packages/39/08/aaaad47bc4e9dc8c725e68f9d04865dbcb2052843ff09c97b08904852d84/urllib3-2.6.3-py3-none-any.whl", hash = "sha256:bf272323e553dfb2e87d9bfd225ca7b0f467b919d7bbd355436d3fd37cb0acd4", size = 131584, upload-time = "2026-01-07T16:24:42.685Z" },
4950
  ]
4951
 
4952
  [[package]]