Sajil Awale commited on
Commit
4260a62
·
1 Parent(s): 51fdd6e

solvede mermaid in streamlit issue by rendering image first

Browse files
Files changed (6) hide show
  1. README.md +2 -2
  2. app.py +69 -7
  3. docs/flowchart.mmd +90 -0
  4. notebooks/9_test_mermaid.ipynb +375 -0
  5. requirements.txt +3 -2
  6. sync_docs.py +38 -0
README.md CHANGED
@@ -137,8 +137,8 @@ The easiest way to run the application is using Docker, as it handles the comple
137
 
138
  1. **Clone the repository**
139
  ```bash
140
- git clone https://github.com/yourusername/resumer.git
141
- cd resumer
142
  ```
143
 
144
  2. **Build and Run**
 
137
 
138
  1. **Clone the repository**
139
  ```bash
140
+ git clone https://github.com/AwaleSajil/resfit
141
+ cd resfit
142
  ```
143
 
144
  2. **Build and Run**
app.py CHANGED
@@ -1,10 +1,15 @@
1
  import streamlit as st
 
2
  import os
3
  import tempfile
4
  import json
 
 
 
5
  from typing import Optional
6
  from pathlib import Path
7
  import asyncio
 
8
 
9
  # API and instructor imports
10
  import instructor
@@ -122,6 +127,50 @@ def get_openai_instructor_client(api_key: str):
122
  # ============================================
123
  # UTILITY FUNCTIONS
124
  # ============================================
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
125
 
126
  def log_message(message: str):
127
  """Add message to processing log"""
@@ -216,7 +265,17 @@ def main():
216
  with col1:
217
  st.title("📄 ResFit: Resume Tailor AI")
218
  st.markdown("*Tailor your resume for any job using AI - **Preserving your Links!***")
219
- st.info("💡 **Why ResFit?** Unlike other tools, this app preserves all hyperlinks in your resume (Portfolio, LinkedIn, GitHub, etc.) while tailoring the content.")
 
 
 
 
 
 
 
 
 
 
220
 
221
  # ========== SIDEBAR: AUTHENTICATION ==========
222
  with st.sidebar:
@@ -278,7 +337,7 @@ def main():
278
  st.divider()
279
 
280
  # Authenticate button
281
- if st.button("🔓 Authenticate", use_container_width=True, type="primary"):
282
  if api_key:
283
  try:
284
  if api_provider == "Gemini":
@@ -308,13 +367,16 @@ def main():
308
  **Model:** {st.session_state.selected_model}
309
  """)
310
 
311
- if st.button("🚪 Logout", use_container_width=True):
312
  st.session_state.authenticated = False
313
  st.session_state.api_key = None
314
  st.session_state.api_provider = None
315
  st.session_state.selected_model = None
316
  st.session_state.aclient = None
317
  st.rerun()
 
 
 
318
 
319
  # ========== MAIN CONTENT ==========
320
  if not st.session_state.authenticated:
@@ -434,7 +496,7 @@ def main():
434
  st.divider()
435
 
436
  # Start processing button
437
- if st.button("🚀 Generate Tailored Resume", use_container_width=True, type="primary", key="btn_start"):
438
  # Clear processing log
439
  st.session_state.processing_log = []
440
 
@@ -525,7 +587,7 @@ def main():
525
  data=st.session_state.resume_bytes,
526
  file_name="original_resume.pdf",
527
  mime="application/pdf",
528
- use_container_width=True
529
  )
530
 
531
  with col2:
@@ -536,7 +598,7 @@ def main():
536
  data=st.session_state.tailored_resume_pdf,
537
  file_name="tailored_resume.pdf",
538
  mime="application/pdf",
539
- use_container_width=True,
540
  type="primary"
541
  )
542
 
@@ -548,7 +610,7 @@ def main():
548
  data=st.session_state.tailored_resume_tex.encode('utf-8'),
549
  file_name="tailored_resume.tex",
550
  mime="text/plain",
551
- use_container_width=True
552
  )
553
  else:
554
  st.info("LaTeX file not available")
 
1
  import streamlit as st
2
+ import streamlit.components.v1 as components
3
  import os
4
  import tempfile
5
  import json
6
+ import textwrap
7
+ import re
8
+ import ast
9
  from typing import Optional
10
  from pathlib import Path
11
  import asyncio
12
+ import requests
13
 
14
  # API and instructor imports
15
  import instructor
 
127
  # ============================================
128
  # UTILITY FUNCTIONS
129
  # ============================================
130
+ import base64
131
+
132
+ import base64
133
+
134
+ def mermaid_chart(code: str, height: int = 600):
135
+ """
136
+ Renders Mermaid.js diagrams in Streamlit by fetching SVG from mermaid.ink.
137
+ Saves the SVG locally and displays it.
138
+ """
139
+ # Clean up code
140
+ code = textwrap.dedent(code).strip()
141
+
142
+ # Encode to base64
143
+ graphbytes = code.encode("utf8")
144
+ base64_bytes = base64.urlsafe_b64encode(graphbytes)
145
+ base64_string = base64_bytes.decode("ascii")
146
+
147
+ # Construct URL
148
+ url = f"https://mermaid.ink/svg/{base64_string}"
149
+
150
+ try:
151
+ # Fetch the SVG
152
+ response = requests.get(url)
153
+ if response.status_code == 200:
154
+ # Display as image
155
+ st.image(response.text, width="stretch")
156
+ else:
157
+ # Fallback: Try without the init block
158
+ import re
159
+ code_no_init = re.sub(r'%%\{init:.*?\}%%', '', code, flags=re.DOTALL).strip()
160
+ graphbytes_fallback = code_no_init.encode("utf8")
161
+ base64_bytes_fallback = base64.urlsafe_b64encode(graphbytes_fallback)
162
+ base64_string_fallback = base64_bytes_fallback.decode("ascii")
163
+ url_fallback = f"https://mermaid.ink/svg/{base64_string_fallback}"
164
+
165
+ response_fallback = requests.get(url_fallback)
166
+ if response_fallback.status_code == 200:
167
+ st.image(response_fallback.text, width="stretch")
168
+ else:
169
+ st.error(f"Failed to render diagram (Status: {response.status_code})")
170
+ st.code(code, language="mermaid")
171
+ except Exception as e:
172
+ st.error(f"Error rendering diagram: {str(e)}")
173
+ st.code(code, language="mermaid")
174
 
175
  def log_message(message: str):
176
  """Add message to processing log"""
 
265
  with col1:
266
  st.title("📄 ResFit: Resume Tailor AI")
267
  st.markdown("*Tailor your resume for any job using AI - **Preserving your Links!***")
268
+ st.info("💡 **Why ResFit?** Unlike other tools, this app preserves all hyperlinks in your resume while tailoring the content.")
269
+
270
+ with st.expander("🔄 How ResFit Works"):
271
+ # Read flowchart from file
272
+ flowchart_path = Path(__file__).parent / "docs" / "flowchart.mmd"
273
+ if flowchart_path.exists():
274
+ with open(flowchart_path, "r") as f:
275
+ flowchart_code = f.read()
276
+ mermaid_chart(flowchart_code, height=800)
277
+ else:
278
+ st.error(f"Flowchart definition not found at {flowchart_path}")
279
 
280
  # ========== SIDEBAR: AUTHENTICATION ==========
281
  with st.sidebar:
 
337
  st.divider()
338
 
339
  # Authenticate button
340
+ if st.button("🔓 Authenticate", width="stretch", type="primary"):
341
  if api_key:
342
  try:
343
  if api_provider == "Gemini":
 
367
  **Model:** {st.session_state.selected_model}
368
  """)
369
 
370
+ if st.button("🚪 Logout", width="stretch"):
371
  st.session_state.authenticated = False
372
  st.session_state.api_key = None
373
  st.session_state.api_provider = None
374
  st.session_state.selected_model = None
375
  st.session_state.aclient = None
376
  st.rerun()
377
+
378
+
379
+ st.markdown("[![GitHub](https://img.shields.io/badge/GitHub-ResFit-181717?logo=github)](https://github.com/AwaleSajil/resfit)")
380
 
381
  # ========== MAIN CONTENT ==========
382
  if not st.session_state.authenticated:
 
496
  st.divider()
497
 
498
  # Start processing button
499
+ if st.button("🚀 Generate Tailored Resume", width="stretch", type="primary", key="btn_start"):
500
  # Clear processing log
501
  st.session_state.processing_log = []
502
 
 
587
  data=st.session_state.resume_bytes,
588
  file_name="original_resume.pdf",
589
  mime="application/pdf",
590
+ width="stretch"
591
  )
592
 
593
  with col2:
 
598
  data=st.session_state.tailored_resume_pdf,
599
  file_name="tailored_resume.pdf",
600
  mime="application/pdf",
601
+ width="stretch",
602
  type="primary"
603
  )
604
 
 
610
  data=st.session_state.tailored_resume_tex.encode('utf-8'),
611
  file_name="tailored_resume.tex",
612
  mime="text/plain",
613
+ width="stretch"
614
  )
615
  else:
616
  st.info("LaTeX file not available")
docs/flowchart.mmd ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ %%{init: {
2
+ 'theme': 'base',
3
+ 'themeVariables': {
4
+ 'primaryColor': '#E1F5FE',
5
+ 'primaryTextColor': '#01579B',
6
+ 'lineColor': '#546E7A',
7
+ 'clusterBkg': '#FAFAFA',
8
+ 'clusterBorder': '#CFD8DC'
9
+ },
10
+ 'flowchart': {
11
+ 'curve': 'basis'
12
+ }
13
+ }}%%
14
+
15
+ graph TD
16
+
17
+ %% === STYLING DEFINITIONS ===
18
+ classDef user fill:#fff9c4,stroke:#fbc02d,stroke-width:2px,rx:10;
19
+ classDef ui fill:#e1f5fe,stroke:#0288d1,stroke-width:2px,rx:5;
20
+ classDef ai fill:#ffe0b2,stroke:#f57c00,stroke-width:2px,rx:10;
21
+ classDef process fill:#ffffff,stroke:#78909c,stroke-width:2px,rx:5;
22
+ classDef data fill:#e1bee7,stroke:#8e24aa,stroke-width:2px,shape:cylinder;
23
+ classDef output fill:#c8e6c9,stroke:#2e7d32,stroke-width:2px,rx:5;
24
+
25
+ %% === THE DIAGRAM ===
26
+
27
+ %% 1. USER INTERFACE LAYER
28
+ subgraph UI_Layer ["🖥️ Frontend / Interface"]
29
+ User([👤 User]):::user
30
+ Streamlit[/"💻 Streamlit UI"/]:::ui
31
+ LLM["🧠 LLM Provider<br/>(OpenAI / Gemini / Claude)"]:::ui
32
+
33
+ User -->|Uploads Files| Streamlit
34
+ Streamlit -.->|Configures| LLM
35
+ end
36
+
37
+ %% 2. THE PIPELINE (BACKEND)
38
+ %% Phase 1: Ingestion
39
+ subgraph P1 ["Phase 1: Input Processing"]
40
+ Parser["📄 Resume Parser<br/>(PyMuPDF4LLM)"]:::process
41
+ Scraper["🌐 Job Scraper<br/>(Web Engine)"]:::process
42
+ end
43
+
44
+ %% Phase 2: Understanding
45
+ subgraph P2 ["Phase 2: AI Orchestration"]
46
+ Extractor{{"🤖 Data Extractor"}}:::ai
47
+ Planner["📋 Content Planner"]:::ai
48
+
49
+ %% Connecting P1 to P2
50
+ Parser --> Extractor
51
+ Scraper --> Extractor
52
+ Extractor --> Planner
53
+ end
54
+
55
+ %% Phase 3: Writing
56
+ subgraph P3 ["Phase 3: Parallel Writing"]
57
+ Workers{{"⚡ Async Workers"}}:::ai
58
+
59
+ S1["📝 Summary"]:::process
60
+ S2["💼 Experience"]:::process
61
+ S3["🛠️ Skills"]:::process
62
+ S4["🚀 Projects"]:::process
63
+
64
+ Planner --> Workers
65
+ Workers --> S1
66
+ Workers --> S2
67
+ Workers --> S3
68
+ Workers --> S4
69
+ end
70
+
71
+ %% Phase 4: Assembly
72
+ subgraph P4 ["Phase 4: Generation"]
73
+ Merger["🔗 Jinja2 Merger"]:::process
74
+ Compiler["⚙️ PDF Compiler<br/>(LaTeX)"]:::process
75
+
76
+ S1 --> Merger
77
+ S2 --> Merger
78
+ S3 --> Merger
79
+ S4 --> Merger
80
+ Merger --> Compiler
81
+ end
82
+
83
+ %% 3. OUTPUT
84
+ Result([📄 Final PDF]):::output
85
+
86
+ %% === CROSS CONNECTIONS ===
87
+ Streamlit --> Parser
88
+ Streamlit --> Scraper
89
+
90
+ Compiler --> Result
notebooks/9_test_mermaid.ipynb ADDED
@@ -0,0 +1,375 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 2,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "%reload_ext autoreload\n",
10
+ "%autoreload 2"
11
+ ]
12
+ },
13
+ {
14
+ "cell_type": "code",
15
+ "execution_count": 3,
16
+ "metadata": {},
17
+ "outputs": [
18
+ {
19
+ "name": "stdout",
20
+ "output_type": "stream",
21
+ "text": [
22
+ "Added to path: /Users/sawale/Documents/learning/resumer\n"
23
+ ]
24
+ }
25
+ ],
26
+ "source": [
27
+ "import sys\n",
28
+ "import os\n",
29
+ "from pathlib import Path\n",
30
+ "\n",
31
+ "# Use Path.cwd() instead of __file__ in Notebooks\n",
32
+ "parent_dir = str(Path.cwd().parent)\n",
33
+ "\n",
34
+ "if parent_dir not in sys.path:\n",
35
+ " sys.path.append(parent_dir)\n",
36
+ "\n",
37
+ "print(f\"Added to path: {parent_dir}\")"
38
+ ]
39
+ },
40
+ {
41
+ "cell_type": "code",
42
+ "execution_count": 3,
43
+ "metadata": {},
44
+ "outputs": [],
45
+ "source": [
46
+ "import os\n",
47
+ "import instructor\n",
48
+ "from google import genai\n",
49
+ "from dotenv import load_dotenv\n",
50
+ "\n",
51
+ "load_dotenv()\n",
52
+ "\n",
53
+ "# 1. Initialize the GenAI Client for Google AI Studio (API Key)\n",
54
+ "# Make sure GEMINI_API_KEY is set in your .env file\n",
55
+ "native_client = genai.Client(\n",
56
+ " api_key=os.environ.get(\"GEMINI_API_KEY\")\n",
57
+ ")\n",
58
+ "\n",
59
+ "# 2. Patch the client with Instructor\n",
60
+ "# The mode remains GENAI_STRUCTURED_OUTPUTS\n",
61
+ "aclient = instructor.from_genai(\n",
62
+ " native_client, \n",
63
+ " # mode=instructor.Mode.GENAI_STRUCTURED_OUTPUTS, \n",
64
+ " mode=instructor.Mode.GENAI_TOOLS,\n",
65
+ " use_async=True\n",
66
+ ")"
67
+ ]
68
+ },
69
+ {
70
+ "cell_type": "code",
71
+ "execution_count": 4,
72
+ "metadata": {},
73
+ "outputs": [
74
+ {
75
+ "data": {
76
+ "text/plain": [
77
+ "<instructor.core.client.AsyncInstructor at 0x1357b5190>"
78
+ ]
79
+ },
80
+ "execution_count": 4,
81
+ "metadata": {},
82
+ "output_type": "execute_result"
83
+ }
84
+ ],
85
+ "source": [
86
+ "aclient"
87
+ ]
88
+ },
89
+ {
90
+ "cell_type": "code",
91
+ "execution_count": 5,
92
+ "metadata": {},
93
+ "outputs": [
94
+ {
95
+ "name": "stdout",
96
+ "output_type": "stream",
97
+ "text": [
98
+ "Consider using the pymupdf_layout package for a greatly improved page layout analysis.\n"
99
+ ]
100
+ }
101
+ ],
102
+ "source": [
103
+ "from resumer import ResumeTailorPipeline"
104
+ ]
105
+ },
106
+ {
107
+ "cell_type": "code",
108
+ "execution_count": 8,
109
+ "metadata": {},
110
+ "outputs": [],
111
+ "source": [
112
+ "pp = ResumeTailorPipeline(\n",
113
+ " aclient = aclient, \n",
114
+ " model_name = \"gemini-3-pro-preview\",\n",
115
+ " resume_path = \"/Users/sawale/Documents/learning/resumer/resumer/demo/Sajil_Awale_CV_2025.pdf\", \n",
116
+ " output_dir= \"./output/\"\n",
117
+ ")\n"
118
+ ]
119
+ },
120
+ {
121
+ "cell_type": "code",
122
+ "execution_count": null,
123
+ "metadata": {},
124
+ "outputs": [
125
+ {
126
+ "name": "stdout",
127
+ "output_type": "stream",
128
+ "text": [
129
+ "--- Scraping job details from: https://lifeattiktok.com/search/7527589557336869138 ---\n",
130
+ "--- Extracting job info via LLM ---\n",
131
+ "--- Cache miss: Extracting resume info via LLM ---\n",
132
+ "--- Successfully extracted both Resume and Job data ---\n",
133
+ "--- Adding section: summary ---\n",
134
+ "--- Adding section: work_experience ---\n",
135
+ "--- Adding section: education ---\n",
136
+ "--- Adding section: skill_sections ---\n",
137
+ "--- Adding section: projects ---\n",
138
+ "--- Adding section: certifications ---\n",
139
+ "--- Adding section: achievements ---\n",
140
+ "--- Adding section: research_works ---\n",
141
+ "## LLM decided this section is not relevant ##\n",
142
+ "--- Adding section: Exchange Program and Fellowship ---\n",
143
+ "--- Adding section: Volunteering and Teaching experience ---\n",
144
+ "--- Adding section: References ---\n",
145
+ "Running command: pdflatex -interaction=nonstopmode -output-directory=./output ./output/tailored_resume.tex\n",
146
+ "PDF generated at: ./output/tailored_resume.pdf\n"
147
+ ]
148
+ }
149
+ ],
150
+ "source": [
151
+ "await pp.generate_tailored_resume(job_url=\"https://lifeattiktok.com/search/7527589557336869138\")"
152
+ ]
153
+ },
154
+ {
155
+ "cell_type": "code",
156
+ "execution_count": null,
157
+ "metadata": {},
158
+ "outputs": [],
159
+ "source": [
160
+ "from resumer.utils.latex_ops import json_to_latex_pdf\n",
161
+ "x = json_to_latex_pdf(pp.resume_details, os.path.join(pp.output_dir, \"tailored_resume.pdf\"))"
162
+ ]
163
+ },
164
+ {
165
+ "cell_type": "code",
166
+ "execution_count": null,
167
+ "metadata": {},
168
+ "outputs": [],
169
+ "source": [
170
+ "pp.resume_details"
171
+ ]
172
+ },
173
+ {
174
+ "cell_type": "code",
175
+ "execution_count": null,
176
+ "metadata": {},
177
+ "outputs": [],
178
+ "source": [
179
+ "pp.resume_details[\"custom_sections\"].keys()"
180
+ ]
181
+ },
182
+ {
183
+ "cell_type": "code",
184
+ "execution_count": null,
185
+ "metadata": {},
186
+ "outputs": [],
187
+ "source": [
188
+ "pp.resume_details[\"custom_sections\"][\"References\"]"
189
+ ]
190
+ },
191
+ {
192
+ "cell_type": "code",
193
+ "execution_count": null,
194
+ "metadata": {},
195
+ "outputs": [],
196
+ "source": [
197
+ "pp.resume_details[\"custom_sections\"]"
198
+ ]
199
+ },
200
+ {
201
+ "cell_type": "code",
202
+ "execution_count": null,
203
+ "metadata": {},
204
+ "outputs": [],
205
+ "source": [
206
+ "pp.resume_info.model_dump()"
207
+ ]
208
+ },
209
+ {
210
+ "cell_type": "code",
211
+ "execution_count": null,
212
+ "metadata": {},
213
+ "outputs": [],
214
+ "source": [
215
+ "pp.job_info"
216
+ ]
217
+ },
218
+ {
219
+ "cell_type": "code",
220
+ "execution_count": null,
221
+ "metadata": {},
222
+ "outputs": [],
223
+ "source": [
224
+ "pp.resume_info.model_dump().keys()"
225
+ ]
226
+ },
227
+ {
228
+ "cell_type": "code",
229
+ "execution_count": null,
230
+ "metadata": {},
231
+ "outputs": [],
232
+ "source": [
233
+ "# loop through custom sections\n",
234
+ "for section in getattr(pp.resume_info, \"custom_sections\"):\n",
235
+ " temp = section.section_name\n",
236
+ " print(temp.plain_text)\n"
237
+ ]
238
+ },
239
+ {
240
+ "cell_type": "code",
241
+ "execution_count": null,
242
+ "metadata": {},
243
+ "outputs": [],
244
+ "source": [
245
+ "pp.resume_info.custom_sections[2].model_dump()"
246
+ ]
247
+ },
248
+ {
249
+ "cell_type": "code",
250
+ "execution_count": null,
251
+ "metadata": {},
252
+ "outputs": [],
253
+ "source": [
254
+ "pp.resume_info.custom_sections"
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": null,
260
+ "metadata": {},
261
+ "outputs": [],
262
+ "source": [
263
+ "# convert the custom section to structure like other noraml section\n",
264
+ "custom_output = {}\n",
265
+ "\n",
266
+ "\n",
267
+ "# loop trhough custom section\n",
268
+ "for csection in pp.resume_info.custom_sections:\n",
269
+ " # setting the key\n",
270
+ " key_name = csection.section_name.plain_text\n",
271
+ " custom_output[key_name] = csection.model_dump()[\"section_detail\"]\n",
272
+ " print(type(custom_output[key_name]))\n",
273
+ "\n",
274
+ "\n",
275
+ "# custom_output"
276
+ ]
277
+ },
278
+ {
279
+ "cell_type": "code",
280
+ "execution_count": null,
281
+ "metadata": {},
282
+ "outputs": [],
283
+ "source": [
284
+ "type(pp.resume_info.model_dump_json(include={\"summary\"}))"
285
+ ]
286
+ },
287
+ {
288
+ "cell_type": "code",
289
+ "execution_count": null,
290
+ "metadata": {},
291
+ "outputs": [],
292
+ "source": [
293
+ "pp.resume_info.model_dump_json(include={\"work_experience\"})"
294
+ ]
295
+ },
296
+ {
297
+ "cell_type": "code",
298
+ "execution_count": null,
299
+ "metadata": {},
300
+ "outputs": [],
301
+ "source": [
302
+ "pp.resume_info.model_dump_json(include={\"skill_sections\"})"
303
+ ]
304
+ },
305
+ {
306
+ "cell_type": "code",
307
+ "execution_count": 4,
308
+ "metadata": {},
309
+ "outputs": [
310
+ {
311
+ "ename": "AttributeError",
312
+ "evalue": "module 'google.genai' has no attribute 'configure'",
313
+ "output_type": "error",
314
+ "traceback": [
315
+ "\u001b[31m---------------------------------------------------------------------------\u001b[39m",
316
+ "\u001b[31mAttributeError\u001b[39m Traceback (most recent call last)",
317
+ "\u001b[36mCell\u001b[39m\u001b[36m \u001b[39m\u001b[32mIn[4]\u001b[39m\u001b[32m, line 10\u001b[39m\n\u001b[32m 4\u001b[39m \u001b[38;5;28;01mfrom\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[34;01mdotenv\u001b[39;00m\u001b[38;5;250m \u001b[39m\u001b[38;5;28;01mimport\u001b[39;00m load_dotenv\n\u001b[32m 6\u001b[39m load_dotenv()\n\u001b[32m---> \u001b[39m\u001b[32m10\u001b[39m \u001b[43mgenai\u001b[49m\u001b[43m.\u001b[49m\u001b[43mconfigure\u001b[49m(api_key=os.environ.get(\u001b[33m\"\u001b[39m\u001b[33mGEMINI_API_KEY\u001b[39m\u001b[33m\"\u001b[39m))\n\u001b[32m 12\u001b[39m \u001b[38;5;28;01mfor\u001b[39;00m m \u001b[38;5;129;01min\u001b[39;00m genai.list_models():\n\u001b[32m 13\u001b[39m \u001b[38;5;28;01mif\u001b[39;00m \u001b[33m'\u001b[39m\u001b[33mgenerateContent\u001b[39m\u001b[33m'\u001b[39m \u001b[38;5;129;01min\u001b[39;00m m.supported_generation_methods:\n",
318
+ "\u001b[31mAttributeError\u001b[39m: module 'google.genai' has no attribute 'configure'"
319
+ ]
320
+ }
321
+ ],
322
+ "source": [
323
+ "import os\n",
324
+ "import instructor\n",
325
+ "from google import genai\n",
326
+ "from dotenv import load_dotenv\n",
327
+ "\n",
328
+ "load_dotenv()\n",
329
+ "\n",
330
+ "\n",
331
+ "\n",
332
+ "genai.configure(api_key=os.environ.get(\"GEMINI_API_KEY\"))\n",
333
+ "\n",
334
+ "for m in genai.list_models():\n",
335
+ " if 'generateContent' in m.supported_generation_methods:\n",
336
+ " print(f\"Model Name: {m.name}\")"
337
+ ]
338
+ },
339
+ {
340
+ "cell_type": "code",
341
+ "execution_count": null,
342
+ "metadata": {},
343
+ "outputs": [],
344
+ "source": []
345
+ },
346
+ {
347
+ "cell_type": "code",
348
+ "execution_count": null,
349
+ "metadata": {},
350
+ "outputs": [],
351
+ "source": []
352
+ }
353
+ ],
354
+ "metadata": {
355
+ "kernelspec": {
356
+ "display_name": "resumer",
357
+ "language": "python",
358
+ "name": "python3"
359
+ },
360
+ "language_info": {
361
+ "codemirror_mode": {
362
+ "name": "ipython",
363
+ "version": 3
364
+ },
365
+ "file_extension": ".py",
366
+ "mimetype": "text/x-python",
367
+ "name": "python",
368
+ "nbconvert_exporter": "python",
369
+ "pygments_lexer": "ipython3",
370
+ "version": "3.12.7"
371
+ }
372
+ },
373
+ "nbformat": 4,
374
+ "nbformat_minor": 2
375
+ }
requirements.txt CHANGED
@@ -1,7 +1,7 @@
1
  google-genai
2
  anthropic
3
  openai
4
- streamlit>=1.28.0
5
  instructor>=1.0.0
6
  instructor[google-genai]
7
  google-cloud-aiplatform
@@ -12,4 +12,5 @@ requests
12
  trafilatura
13
  undetected-chromedriver
14
  jinja2
15
- python-dotenv
 
 
1
  google-genai
2
  anthropic
3
  openai
4
+ streamlit>=1.31.0
5
  instructor>=1.0.0
6
  instructor[google-genai]
7
  google-cloud-aiplatform
 
12
  trafilatura
13
  undetected-chromedriver
14
  jinja2
15
+ python-dotenv
16
+ streamlit-mermaid
sync_docs.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import re
2
+ from pathlib import Path
3
+
4
+ def sync_flowchart():
5
+ mmd_path = Path("docs/flowchart.mmd")
6
+ readme_path = Path("README.md")
7
+
8
+ if not mmd_path.exists():
9
+ print(f"Error: {mmd_path} not found")
10
+ return
11
+
12
+ if not readme_path.exists():
13
+ print(f"Error: {readme_path} not found")
14
+ return
15
+
16
+ with open(mmd_path, "r") as f:
17
+ mmd_content = f.read().strip()
18
+
19
+ with open(readme_path, "r") as f:
20
+ readme_content = f.read()
21
+
22
+ # Regex to find the mermaid block in README.md
23
+ # It looks for ```mermaid ... ```
24
+ pattern = r"```mermaid\n(.*?)\n```"
25
+
26
+ new_mermaid_block = f"```mermaid\n{mmd_content}\n```"
27
+
28
+ if re.search(pattern, readme_content, re.DOTALL):
29
+ new_readme_content = re.sub(pattern, new_mermaid_block, readme_content, flags=re.DOTALL)
30
+
31
+ with open(readme_path, "w") as f:
32
+ f.write(new_readme_content)
33
+ print("Successfully synced flowchart to README.md")
34
+ else:
35
+ print("Could not find mermaid block in README.md")
36
+
37
+ if __name__ == "__main__":
38
+ sync_flowchart()