jaothan commited on
Commit
324e9cf
·
verified ·
1 Parent(s): b076e0b

Upload 18 files

Browse files
.gitattributes CHANGED
@@ -1,35 +1,35 @@
1
- *.7z filter=lfs diff=lfs merge=lfs -text
2
- *.arrow filter=lfs diff=lfs merge=lfs -text
3
- *.bin filter=lfs diff=lfs merge=lfs -text
4
- *.bz2 filter=lfs diff=lfs merge=lfs -text
5
- *.ckpt filter=lfs diff=lfs merge=lfs -text
6
- *.ftz filter=lfs diff=lfs merge=lfs -text
7
- *.gz filter=lfs diff=lfs merge=lfs -text
8
- *.h5 filter=lfs diff=lfs merge=lfs -text
9
- *.joblib filter=lfs diff=lfs merge=lfs -text
10
- *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
- *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
- *.model filter=lfs diff=lfs merge=lfs -text
13
- *.msgpack filter=lfs diff=lfs merge=lfs -text
14
- *.npy filter=lfs diff=lfs merge=lfs -text
15
- *.npz filter=lfs diff=lfs merge=lfs -text
16
- *.onnx filter=lfs diff=lfs merge=lfs -text
17
- *.ot filter=lfs diff=lfs merge=lfs -text
18
- *.parquet filter=lfs diff=lfs merge=lfs -text
19
- *.pb filter=lfs diff=lfs merge=lfs -text
20
- *.pickle filter=lfs diff=lfs merge=lfs -text
21
- *.pkl filter=lfs diff=lfs merge=lfs -text
22
- *.pt filter=lfs diff=lfs merge=lfs -text
23
- *.pth filter=lfs diff=lfs merge=lfs -text
24
- *.rar filter=lfs diff=lfs merge=lfs -text
25
- *.safetensors filter=lfs diff=lfs merge=lfs -text
26
- saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
- *.tar.* filter=lfs diff=lfs merge=lfs -text
28
- *.tar filter=lfs diff=lfs merge=lfs -text
29
- *.tflite filter=lfs diff=lfs merge=lfs -text
30
- *.tgz filter=lfs diff=lfs merge=lfs -text
31
- *.wasm filter=lfs diff=lfs merge=lfs -text
32
- *.xz filter=lfs diff=lfs merge=lfs -text
33
- *.zip filter=lfs diff=lfs merge=lfs -text
34
- *.zst filter=lfs diff=lfs merge=lfs -text
35
- *tfevents* filter=lfs diff=lfs merge=lfs -text
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
.gradio/certificate.pem ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ -----BEGIN CERTIFICATE-----
2
+ MIIFazCCA1OgAwIBAgIRAIIQz7DSQONZRGPgu2OCiwAwDQYJKoZIhvcNAQELBQAw
3
+ TzELMAkGA1UEBhMCVVMxKTAnBgNVBAoTIEludGVybmV0IFNlY3VyaXR5IFJlc2Vh
4
+ cmNoIEdyb3VwMRUwEwYDVQQDEwxJU1JHIFJvb3QgWDEwHhcNMTUwNjA0MTEwNDM4
5
+ WhcNMzUwNjA0MTEwNDM4WjBPMQswCQYDVQQGEwJVUzEpMCcGA1UEChMgSW50ZXJu
6
+ ZXQgU2VjdXJpdHkgUmVzZWFyY2ggR3JvdXAxFTATBgNVBAMTDElTUkcgUm9vdCBY
7
+ MTCCAiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBAK3oJHP0FDfzm54rVygc
8
+ h77ct984kIxuPOZXoHj3dcKi/vVqbvYATyjb3miGbESTtrFj/RQSa78f0uoxmyF+
9
+ 0TM8ukj13Xnfs7j/EvEhmkvBioZxaUpmZmyPfjxwv60pIgbz5MDmgK7iS4+3mX6U
10
+ A5/TR5d8mUgjU+g4rk8Kb4Mu0UlXjIB0ttov0DiNewNwIRt18jA8+o+u3dpjq+sW
11
+ T8KOEUt+zwvo/7V3LvSye0rgTBIlDHCNAymg4VMk7BPZ7hm/ELNKjD+Jo2FR3qyH
12
+ B5T0Y3HsLuJvW5iB4YlcNHlsdu87kGJ55tukmi8mxdAQ4Q7e2RCOFvu396j3x+UC
13
+ B5iPNgiV5+I3lg02dZ77DnKxHZu8A/lJBdiB3QW0KtZB6awBdpUKD9jf1b0SHzUv
14
+ KBds0pjBqAlkd25HN7rOrFleaJ1/ctaJxQZBKT5ZPt0m9STJEadao0xAH0ahmbWn
15
+ OlFuhjuefXKnEgV4We0+UXgVCwOPjdAvBbI+e0ocS3MFEvzG6uBQE3xDk3SzynTn
16
+ jh8BCNAw1FtxNrQHusEwMFxIt4I7mKZ9YIqioymCzLq9gwQbooMDQaHWBfEbwrbw
17
+ qHyGO0aoSCqI3Haadr8faqU9GY/rOPNk3sgrDQoo//fb4hVC1CLQJ13hef4Y53CI
18
+ rU7m2Ys6xt0nUW7/vGT1M0NPAgMBAAGjQjBAMA4GA1UdDwEB/wQEAwIBBjAPBgNV
19
+ HRMBAf8EBTADAQH/MB0GA1UdDgQWBBR5tFnme7bl5AFzgAiIyBpY9umbbjANBgkq
20
+ hkiG9w0BAQsFAAOCAgEAVR9YqbyyqFDQDLHYGmkgJykIrGF1XIpu+ILlaS/V9lZL
21
+ ubhzEFnTIZd+50xx+7LSYK05qAvqFyFWhfFQDlnrzuBZ6brJFe+GnY+EgPbk6ZGQ
22
+ 3BebYhtF8GaV0nxvwuo77x/Py9auJ/GpsMiu/X1+mvoiBOv/2X/qkSsisRcOj/KK
23
+ NFtY2PwByVS5uCbMiogziUwthDyC3+6WVwW6LLv3xLfHTjuCvjHIInNzktHCgKQ5
24
+ ORAzI4JMPJ+GslWYHb4phowim57iaztXOoJwTdwJx4nLCgdNbOhdjsnvzqvHu7Ur
25
+ TkXWStAmzOVyyghqpZXjFaH3pO3JLF+l+/+sKAIuvtd7u+Nxe5AW0wdeRlN8NwdC
26
+ jNPElpzVmbUq4JUagEiuTDkHzsxHpFKVK7q4+63SM1N95R1NbdWhscdCb+ZAJzVc
27
+ oyi3B43njTOQ5yOf+1CceWxG1bQVs5ZufpsMljq4Ui0/1lvh+wjChP4kqKOJ2qxq
28
+ 4RgqsahDYVvTH9w7jXbyLeiNdd8XM2w9U/t7y0Ff/9yi0GE44Za4rF2LN9d11TPA
29
+ mRGunUHBcnWEvgJBQl9nJEiU0Zsnvgc/ubhPgXRR4Xq37Z0j4r7g1SgEEzwxA57d
30
+ emyPxgcYxn/eR44/KJ4EBs+lVDR3veyJm+kXQ99b21/+jh5Xos1AnX5iItreGCc=
31
+ -----END CERTIFICATE-----
CONTRIBUTING.md ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ # Contributor Covenant Code of Conduct
2
+
3
+ ## Our Pledge
4
+ We as members, contributors, and leaders pledge to make participation in our community a harassment-free experience for everyone, regardless of age, body size, visible or invisible disability, ethnicity, sex characteristics, gender identity, education, socioeconomic status, nationality, personal appearance, race, religion, or sexual identity and orientation.
5
+
6
+ ...
7
+
8
+ ## Enforcement
9
+ Instances of abusive, harassing, or otherwise unacceptable behavior may be reported to the project team at [your-email@example.com].
Sample_Resume.docx ADDED
Binary file (8.55 kB). View file
 
app.py ADDED
@@ -0,0 +1,303 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ import os
3
+ from selenium import webdriver
4
+ from selenium.webdriver.common.by import By
5
+ from selenium.webdriver.chrome.service import Service
6
+ from webdriver_manager.chrome import ChromeDriverManager
7
+ from webdriver_manager.core.os_manager import ChromeType
8
+ from selenium.webdriver.support.ui import WebDriverWait
9
+ from selenium.webdriver.support import expected_conditions as EC
10
+ from selenium.common.exceptions import TimeoutException, NoSuchElementException
11
+ from selenium.webdriver.chrome.service import Service
12
+ from webdriver_manager.chrome import ChromeDriverManager
13
+ from openai import OpenAI
14
+ import time
15
+ from docx import Document
16
+ from dotenv import load_dotenv
17
+ import tempfile
18
+
19
+ # Load environment variables
20
+ load_dotenv(override=True)
21
+
22
+ # LinkedIn logo as an SVG string
23
+ LINKEDIN_LOGO = """
24
+ <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 24 24" width="50" height="50">
25
+ <path fill="#0A66C2" d="M20.447 20.452h-3.554v-5.569c0-1.328-.027-3.037-1.852-3.037-1.853 0-2.136 1.445-2.136 2.939v5.667H9.351V9h3.414v1.561h.046c.477-.9 1.637-1.85 3.37-1.85 3.601 0 4.267 2.37 4.267 5.455v6.286zM5.337 7.433c-1.144 0-2.063-.926-2.063-2.065 0-1.138.92-2.063 2.063-2.063 1.14 0 2.064.925 2.064 2.063 0 1.139-.925 2.065-2.064 2.065zm1.782 13.019H3.555V9h3.564v11.452zM22.225 0H1.771C.792 0 0 .774 0 1.729v20.542C0 23.227.792 24 1.771 24h20.451C23.2 24 24 23.227 24 22.271V1.729C24 .774 23.2 0 22.225 0z"/>
26
+ </svg>
27
+ """
28
+
29
+ class LinkedInJDScraper:
30
+ def __init__(self, email, password):
31
+ """Initialize the LinkedIn scraper with credentials"""
32
+ try:
33
+ options = webdriver.ChromeOptions()
34
+ options.add_argument('--disable-blink-features=AutomationControlled')
35
+ options.add_argument('--start-maximized')
36
+ options.add_argument('--no-sandbox')
37
+ options.add_argument('--disable-dev-shm-usage')
38
+ options.add_argument('--headless')
39
+ options.add_argument('--disable-gpu')
40
+ options.add_argument('--remote-debugging-port=9222')
41
+
42
+ # Use system ChromeDriver
43
+ service = Service(executable_path='/usr/bin/chromedriver')
44
+
45
+ self.driver = webdriver.Chrome(service=service, options=options)
46
+ self.wait = WebDriverWait(self.driver, 10)
47
+ self.email = email
48
+ self.password = password
49
+ except Exception as e:
50
+ raise Exception(f"Failed to initialize Chrome driver: {str(e)}")
51
+
52
+
53
+ def login(self):
54
+ """Login to LinkedIn"""
55
+ try:
56
+ self.driver.get("https://www.linkedin.com/login")
57
+ time.sleep(3) # Give page time to load
58
+
59
+ # Clear any existing data
60
+ self.driver.delete_all_cookies()
61
+
62
+ # Add additional headers
63
+ self.driver.execute_cdp_cmd('Network.setUserAgentOverride', {
64
+ "userAgent": 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/108.0.0.0 Safari/537.36'
65
+ })
66
+
67
+ email_field = self.wait.until(
68
+ EC.presence_of_element_located((By.ID, "username"))
69
+ )
70
+ email_field.clear()
71
+ email_field.send_keys(self.email)
72
+
73
+ password_field = self.driver.find_element(By.ID, "password")
74
+ password_field.clear()
75
+ password_field.send_keys(self.password)
76
+
77
+ login_button = self.driver.find_element(By.CSS_SELECTOR, "button[type='submit']")
78
+ self.driver.execute_script("arguments[0].click();", login_button)
79
+
80
+ time.sleep(5) # Give more time for login to complete
81
+
82
+ # Verify login success
83
+ if "feed" in self.driver.current_url or "mynetwork" in self.driver.current_url:
84
+ return True
85
+ else:
86
+ raise Exception("Login verification failed")
87
+
88
+ except Exception as e:
89
+ return f"Login failed: {str(e)}"
90
+
91
+ def get_description(self, job_url):
92
+ """Scrape job description from LinkedIn"""
93
+ max_retries = 3
94
+ retry_count = 0
95
+
96
+ while retry_count < max_retries:
97
+ try:
98
+ self.driver.get(job_url)
99
+ time.sleep(5) # Give more time for the page to load
100
+
101
+ # Check if we're still logged in
102
+ if "login" in self.driver.current_url.lower():
103
+ self.login()
104
+ self.driver.get(job_url)
105
+ time.sleep(5)
106
+
107
+ # Wait for any one of these selectors to be present
108
+ description_selectors = [
109
+ "div.jobs-description",
110
+ "div.jobs-description-content",
111
+ "div.jobs-box__html-content",
112
+ "div.show-more-less-html__markup"
113
+ ]
114
+
115
+ description_element = None
116
+ for selector in description_selectors:
117
+ try:
118
+ description_element = self.wait.until(
119
+ EC.presence_of_element_located((By.CSS_SELECTOR, selector))
120
+ )
121
+ if description_element:
122
+ break
123
+ except:
124
+ continue
125
+
126
+ if not description_element:
127
+ raise NoSuchElementException("Could not find job description element")
128
+
129
+ # Try to expand the description if there's a "show more" button
130
+ try:
131
+ show_more_button = self.driver.find_element(By.CSS_SELECTOR,
132
+ "button.show-more-less-html__button")
133
+ if show_more_button.is_displayed():
134
+ self.driver.execute_script("arguments[0].click();", show_more_button)
135
+ time.sleep(2)
136
+ except:
137
+ pass
138
+
139
+ # Get the text content
140
+ description = description_element.text
141
+
142
+ if description:
143
+ # Try to clean up the description
144
+ description = description.strip()
145
+
146
+ # Look for common section markers
147
+ markers = ["about the job", "job description", "about this role"]
148
+ for marker in markers:
149
+ start_index = description.lower().find(marker)
150
+ if start_index != -1:
151
+ description = description[start_index:]
152
+ break
153
+
154
+ return description
155
+
156
+ retry_count += 1
157
+ time.sleep(3) # Increased wait between retries
158
+
159
+ except Exception as e:
160
+ print(f"Attempt {retry_count + 1} failed: {str(e)}")
161
+ retry_count += 1
162
+ time.sleep(3)
163
+
164
+ return "Failed to retrieve job description after multiple attempts"
165
+
166
+ def read_resume(file_obj):
167
+ """Read resume content from uploaded file"""
168
+ try:
169
+ # Get the file path directly from the Gradio file object
170
+ file_path = file_obj.name
171
+
172
+ # Read the document directly from the path
173
+ doc = Document(file_path)
174
+ text = []
175
+ for paragraph in doc.paragraphs:
176
+ if paragraph.text.strip():
177
+ text.append(paragraph.text.strip())
178
+
179
+ return "\n".join(text)
180
+ except Exception as e:
181
+ return f"Error reading resume: {str(e)}"
182
+
183
+
184
+ def generate_cover_letter(linkedin_email, linkedin_password, job_url, resume_file):
185
+ """Generate cover letter based on job description and resume"""
186
+
187
+ # Input validation
188
+ if not all([linkedin_email, linkedin_password, job_url]):
189
+ return "Please fill in all required fields (LinkedIn email, password, and job URL)"
190
+
191
+ if not resume_file:
192
+ return "Please upload a resume file"
193
+
194
+ try:
195
+ # Initialize scraper and get job description
196
+ scraper = LinkedInJDScraper(linkedin_email, linkedin_password)
197
+ login_result = scraper.login()
198
+
199
+ if isinstance(login_result, str) and "failed" in login_result.lower():
200
+ scraper.close()
201
+ return login_result
202
+
203
+ # Get job description
204
+ job_description = scraper.get_description(job_url)
205
+ scraper.close()
206
+
207
+ if "Failed to retrieve" in job_description:
208
+ return job_description
209
+
210
+ # Read resume
211
+ resume_content = read_resume(resume_file)
212
+ if "Error reading resume" in resume_content:
213
+ return resume_content
214
+
215
+ # Prepare prompts
216
+ system_prompt = """
217
+ You are a career assistant specialized in crafting professional and personalized cover letters.
218
+ Your goal is to create compelling, tailored cover letters that align with the job description.
219
+ Each cover letter should emphasize the user's qualifications, skills, and experiences while maintaining a professional tone and structure.
220
+ """
221
+
222
+ user_prompt = f"""
223
+ Create a professional cover letter based on:
224
+
225
+ Resume content:
226
+ {resume_content}
227
+
228
+ Job Description:
229
+ {job_description}
230
+
231
+ Make the cover letter specific to the role and highlight relevant experience.
232
+ """
233
+
234
+ # Generate cover letter using OpenAI
235
+ api_key = os.getenv('OPENAI_API_KEY')
236
+ if not api_key:
237
+ return "OpenAI API key not found. Please check your .env file."
238
+
239
+ client = OpenAI(api_key=api_key)
240
+ response = client.chat.completions.create(
241
+ model="gpt-4",
242
+ messages=[
243
+ {"role": "system", "content": system_prompt},
244
+ {"role": "user", "content": user_prompt}
245
+ ]
246
+ )
247
+ return response.choices[0].message.content
248
+
249
+ except Exception as e:
250
+ return f"Error generating cover letter: {str(e)}"
251
+ finally:
252
+ # Ensure browser is closed
253
+ if 'scraper' in locals():
254
+ scraper.close()
255
+
256
+ def create_gradio_interface():
257
+ """Create and configure the Gradio interface"""
258
+ with gr.Blocks(title="LinkedIn Cover Letter Generator") as app:
259
+ # Header with logo and title
260
+ with gr.Row():
261
+ with gr.Column(scale=1):
262
+ gr.HTML(LINKEDIN_LOGO)
263
+ with gr.Column(scale=4):
264
+ gr.Markdown("# LinkedIn Cover Letter Generator")
265
+
266
+ with gr.Row():
267
+ with gr.Column():
268
+ linkedin_email = gr.Textbox(
269
+ label="LinkedIn Email",
270
+ placeholder="Enter your LinkedIn email"
271
+ )
272
+ linkedin_password = gr.Textbox(
273
+ label="LinkedIn Password",
274
+ type="password",
275
+ placeholder="Enter your LinkedIn password"
276
+ )
277
+ job_url = gr.Textbox(
278
+ label="LinkedIn Job URL",
279
+ placeholder="Paste the LinkedIn job posting URL"
280
+ )
281
+ resume_file = gr.File(
282
+ label="Upload Resume (DOCX)",
283
+ file_types=[".docx"]
284
+ )
285
+
286
+ generate_button = gr.Button("Generate Cover Letter", variant="primary")
287
+
288
+ with gr.Column():
289
+ output = gr.Markdown(label="Generated Cover Letter")
290
+
291
+ generate_button.click(
292
+ fn=generate_cover_letter,
293
+ inputs=[linkedin_email, linkedin_password, job_url, resume_file],
294
+ outputs=output
295
+ )
296
+
297
+ return app
298
+
299
+ # Main execution
300
+ if __name__ == "__main__":
301
+ # Create and launch the Gradio app
302
+ app = create_gradio_interface()
303
+ app.launch(share=True)
cover_letter_generator/.gitattributes ADDED
@@ -0,0 +1,35 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ *.7z filter=lfs diff=lfs merge=lfs -text
2
+ *.arrow filter=lfs diff=lfs merge=lfs -text
3
+ *.bin filter=lfs diff=lfs merge=lfs -text
4
+ *.bz2 filter=lfs diff=lfs merge=lfs -text
5
+ *.ckpt filter=lfs diff=lfs merge=lfs -text
6
+ *.ftz filter=lfs diff=lfs merge=lfs -text
7
+ *.gz filter=lfs diff=lfs merge=lfs -text
8
+ *.h5 filter=lfs diff=lfs merge=lfs -text
9
+ *.joblib filter=lfs diff=lfs merge=lfs -text
10
+ *.lfs.* filter=lfs diff=lfs merge=lfs -text
11
+ *.mlmodel filter=lfs diff=lfs merge=lfs -text
12
+ *.model filter=lfs diff=lfs merge=lfs -text
13
+ *.msgpack filter=lfs diff=lfs merge=lfs -text
14
+ *.npy filter=lfs diff=lfs merge=lfs -text
15
+ *.npz filter=lfs diff=lfs merge=lfs -text
16
+ *.onnx filter=lfs diff=lfs merge=lfs -text
17
+ *.ot filter=lfs diff=lfs merge=lfs -text
18
+ *.parquet filter=lfs diff=lfs merge=lfs -text
19
+ *.pb filter=lfs diff=lfs merge=lfs -text
20
+ *.pickle filter=lfs diff=lfs merge=lfs -text
21
+ *.pkl filter=lfs diff=lfs merge=lfs -text
22
+ *.pt filter=lfs diff=lfs merge=lfs -text
23
+ *.pth filter=lfs diff=lfs merge=lfs -text
24
+ *.rar filter=lfs diff=lfs merge=lfs -text
25
+ *.safetensors filter=lfs diff=lfs merge=lfs -text
26
+ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
27
+ *.tar.* filter=lfs diff=lfs merge=lfs -text
28
+ *.tar filter=lfs diff=lfs merge=lfs -text
29
+ *.tflite filter=lfs diff=lfs merge=lfs -text
30
+ *.tgz filter=lfs diff=lfs merge=lfs -text
31
+ *.wasm filter=lfs diff=lfs merge=lfs -text
32
+ *.xz filter=lfs diff=lfs merge=lfs -text
33
+ *.zip filter=lfs diff=lfs merge=lfs -text
34
+ *.zst filter=lfs diff=lfs merge=lfs -text
35
+ *tfevents* filter=lfs diff=lfs merge=lfs -text
cover_letter_generator/README.md ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ---
2
+ title: Cover Letter Generator
3
+ emoji: 💬
4
+ colorFrom: yellow
5
+ colorTo: purple
6
+ sdk: gradio
7
+ sdk_version: 5.0.1
8
+ app_file: app.py
9
+ pinned: false
10
+ license: mit
11
+ short_description: Create your cover letter for LinkedIn Jobs
12
+ ---
13
+
14
+ An example chatbot using [Gradio](https://gradio.app), [`huggingface_hub`](https://huggingface.co/docs/huggingface_hub/v0.22.2/en/index), and the [Hugging Face Inference API](https://huggingface.co/docs/api-inference/index).
cover_letter_generator/app.py ADDED
@@ -0,0 +1,64 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import gradio as gr
2
+ from huggingface_hub import InferenceClient
3
+
4
+ """
5
+ For more information on `huggingface_hub` Inference API support, please check the docs: https://huggingface.co/docs/huggingface_hub/v0.22.2/en/guides/inference
6
+ """
7
+ client = InferenceClient("HuggingFaceH4/zephyr-7b-beta")
8
+
9
+
10
+ def respond(
11
+ message,
12
+ history: list[tuple[str, str]],
13
+ system_message,
14
+ max_tokens,
15
+ temperature,
16
+ top_p,
17
+ ):
18
+ messages = [{"role": "system", "content": system_message}]
19
+
20
+ for val in history:
21
+ if val[0]:
22
+ messages.append({"role": "user", "content": val[0]})
23
+ if val[1]:
24
+ messages.append({"role": "assistant", "content": val[1]})
25
+
26
+ messages.append({"role": "user", "content": message})
27
+
28
+ response = ""
29
+
30
+ for message in client.chat_completion(
31
+ messages,
32
+ max_tokens=max_tokens,
33
+ stream=True,
34
+ temperature=temperature,
35
+ top_p=top_p,
36
+ ):
37
+ token = message.choices[0].delta.content
38
+
39
+ response += token
40
+ yield response
41
+
42
+
43
+ """
44
+ For information on how to customize the ChatInterface, peruse the gradio docs: https://www.gradio.app/docs/chatinterface
45
+ """
46
+ demo = gr.ChatInterface(
47
+ respond,
48
+ additional_inputs=[
49
+ gr.Textbox(value="You are a friendly Chatbot.", label="System message"),
50
+ gr.Slider(minimum=1, maximum=2048, value=512, step=1, label="Max new tokens"),
51
+ gr.Slider(minimum=0.1, maximum=4.0, value=0.7, step=0.1, label="Temperature"),
52
+ gr.Slider(
53
+ minimum=0.1,
54
+ maximum=1.0,
55
+ value=0.95,
56
+ step=0.05,
57
+ label="Top-p (nucleus sampling)",
58
+ ),
59
+ ],
60
+ )
61
+
62
+
63
+ if __name__ == "__main__":
64
+ demo.launch()
cover_letter_generator/requirements.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ huggingface_hub==0.25.2
generator.py ADDED
@@ -0,0 +1,86 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ from docx import Document
4
+ from dotenv import load_dotenv
5
+ from openai import OpenAI
6
+
7
+ class CVGenerator:
8
+ def __init__(self):
9
+ # Load API key for OpenAI
10
+ load_dotenv()
11
+ self.api_key = os.getenv("OPENAI_API_KEY")
12
+ if not self.api_key:
13
+ raise ValueError("OpenAI API key not found. Please set it in the .env file.")
14
+ self.model = OpenAI(api_key=self.api_key)
15
+
16
+ def generate_cv(self, job_description, user_details):
17
+ """
18
+ Generates a tailored CV based on the job description and user details.
19
+
20
+ :param job_description: Text of the job description.
21
+ :param user_details: Dictionary containing user information (name, skills, experience, etc.).
22
+ :return: A formatted CV as a string.
23
+ """
24
+ prompt = f"""
25
+ Create a tailored CV for the following job description and user details:
26
+
27
+ Job Description:
28
+ {job_description}
29
+
30
+ User Details:
31
+ {user_details}
32
+
33
+ The CV should be professional and formatted for ATS (Applicant Tracking Systems). Include the following sections:
34
+ - Contact Information
35
+ - Professional Summary
36
+ - Skills
37
+ - Work Experience
38
+ - Education
39
+ - Certifications (if any)
40
+ - References (if provided)
41
+ """
42
+
43
+ response = self.model.generate(prompt, model="gpt-4", max_tokens=1500)
44
+ return response.get("choices")[0].get("text").strip()
45
+
46
+ def save_cv(self, cv_text, file_name="Tailored_CV.docx"):
47
+ """
48
+ Saves the CV text to a Word document.
49
+
50
+ :param cv_text: The CV content as plain text.
51
+ :param file_name: The name of the file to save the CV.
52
+ """
53
+ doc = Document()
54
+ doc.add_paragraph(cv_text)
55
+ doc.save(file_name)
56
+ print(f"CV saved as {file_name}")
57
+
58
+ if __name__ == "__main__":
59
+ # Example usage
60
+ generator = CVGenerator()
61
+
62
+ # Sample job description and user details
63
+ job_description = "We are looking for a data scientist skilled in Python, machine learning, and data visualization."
64
+ user_details = {
65
+ "name": "John Doe",
66
+ "email": "john.doe@example.com",
67
+ "phone": "123-456-7890",
68
+ "skills": ["Python", "Machine Learning", "Data Visualization", "SQL", "R"],
69
+ "experience": [
70
+ {
71
+ "title": "Data Analyst",
72
+ "company": "ABC Corp",
73
+ "duration": "Jan 2020 - Dec 2022",
74
+ "responsibilities": [
75
+ "Analyzed large datasets to generate actionable insights.",
76
+ "Developed predictive models to optimize business processes."
77
+ ]
78
+ }
79
+ ],
80
+ "education": "M.S. in Data Science, XYZ University",
81
+ "certifications": ["Certified Data Scientist"],
82
+ }
83
+
84
+ # Generate and save the CV
85
+ cv_text = generator.generate_cv(job_description, user_details)
86
+ generator.save_cv(cv_text)
main.py ADDED
@@ -0,0 +1,212 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import requests
3
+ import json
4
+ from typing import List
5
+ from openai import OpenAI
6
+ from dotenv import load_dotenv
7
+ from bs4 import BeautifulSoup
8
+ from IPython.display import Markdown, display, update_display
9
+ from openai import OpenAI
10
+ from selenium import webdriver
11
+ from selenium.webdriver.common.by import By
12
+ from selenium.webdriver.support.ui import WebDriverWait
13
+ from selenium.webdriver.support import expected_conditions as EC
14
+ from selenium.common.exceptions import TimeoutException, NoSuchElementException
15
+ import time
16
+ from docx import Document
17
+
18
+ # Initialize and constants
19
+ load_dotenv(override=True)
20
+ api_key = os.getenv('OPENAI_API_KEY')
21
+
22
+ if api_key and api_key.startswith('sk-proj-') and len(api_key) > 10:
23
+ print("API key looks good so far")
24
+ else:
25
+ print("There might be a problem with API key, Please Check")
26
+
27
+ MODEL_GPT = 'gpt-4o-mini'
28
+ openai = OpenAI()
29
+
30
+ class LinkedInJDScraper:
31
+ def __init__(self, email, password):
32
+ options = webdriver.ChromeOptions()
33
+ options.add_argument('--disable-blink-features=AutomationControlled')
34
+ options.add_argument('--start-maximized')
35
+ # Add these options to prevent infinite loops
36
+ options.add_argument('--no-sandbox')
37
+ options.add_argument('--disable-dev-shm-usage')
38
+
39
+ self.driver = webdriver.Chrome(options=options)
40
+ self.wait = WebDriverWait(self.driver, 10) # Reduced wait time
41
+ self.email = email
42
+ self.password = password
43
+
44
+ def login(self):
45
+ try:
46
+ self.driver.get("https://www.linkedin.com/login")
47
+
48
+ # Wait for email field with timeout
49
+ email_field = self.wait.until(
50
+ EC.presence_of_element_located((By.ID, "username"))
51
+ )
52
+ email_field.send_keys(self.email)
53
+
54
+ # Find password field
55
+ password_field = self.driver.find_element(By.ID, "password")
56
+ password_field.send_keys(self.password)
57
+
58
+ # Find and click login button
59
+ login_button = self.driver.find_element(By.CSS_SELECTOR, "button[type='submit']")
60
+ login_button.click()
61
+
62
+ # Wait for login to complete with timeout
63
+ time.sleep(3)
64
+ return True
65
+
66
+ except Exception as e:
67
+ print(f"Login failed: {str(e)}")
68
+ return False
69
+
70
+ def get_description(self, job_url):
71
+ max_retries = 3
72
+ retry_count = 0
73
+
74
+ while retry_count < max_retries:
75
+ try:
76
+ # Navigate to job page
77
+ self.driver.get(job_url)
78
+
79
+ # Wait for any description element to be present
80
+ self.wait.until(
81
+ EC.presence_of_element_located((By.CSS_SELECTOR, "div.jobs-description"))
82
+ )
83
+
84
+ # Try to click "Show more" if it exists
85
+ try:
86
+ show_more = self.wait.until(
87
+ EC.presence_of_element_located((By.CSS_SELECTOR, "button.show-more-less-html__button"))
88
+ )
89
+ if show_more.is_displayed():
90
+ show_more.click()
91
+ time.sleep(1)
92
+ except (TimeoutException, NoSuchElementException):
93
+ pass
94
+
95
+ # Get description text
96
+ description_element = self.driver.find_element(By.CSS_SELECTOR, "div.jobs-description")
97
+ description = description_element.text
98
+
99
+ if description:
100
+ # Find start of job description
101
+ start_index = description.lower().find("about the job")
102
+ if start_index != -1:
103
+ return description[start_index:]
104
+ return description
105
+
106
+ retry_count += 1
107
+ time.sleep(2)
108
+
109
+ except Exception as e:
110
+ print(f"Attempt {retry_count + 1} failed: {str(e)}")
111
+ retry_count += 1
112
+ time.sleep(2)
113
+
114
+ return "Failed to retrieve job description after multiple attempts"
115
+
116
+ def close(self):
117
+ try:
118
+ self.driver.quit()
119
+ except:
120
+ pass
121
+
122
+ if __name__ == "__main__":
123
+ EMAIL = os.getenv('LINKEDIN_EMAIL')
124
+ PASSWORD = os.getenv('LINKEDIN_PASSWORD')
125
+
126
+ scraper = LinkedInJDScraper(EMAIL, PASSWORD)
127
+
128
+ try:
129
+ if scraper.login():
130
+ job_url = "https://www.linkedin.com/jobs/view/4109295398"
131
+ description = scraper.get_description(job_url)
132
+ print("\nJob Description:")
133
+ print(description)
134
+ else:
135
+ print("Failed to login. Please check your credentials.")
136
+ finally:
137
+ scraper.close()
138
+
139
+ system_prompt = """
140
+ You are a career assistant specialized in crafting professional and personalized cover letters.
141
+ Your goal is to create compelling, tailored cover letters that align with the job description.
142
+ Each cover letter should emphasize the user’s qualifications, skills, and experiences while maintaining a professional tone and structure.
143
+ Ensure the letter adheres to the following format:
144
+
145
+ 1. **Introduction**: A brief and enthusiastic introduction expressing interest in the role and organization.
146
+ 2. **Body**: Highlight relevant skills, experiences, and achievements that align with the job description. Use specific examples when possible.
147
+ 3. **Closing**: Reiterate enthusiasm for the role, express willingness to contribute to the organization, and include a polite call to action.
148
+
149
+ Maintain clarity, professionalism, and conciseness while tailoring the letter. Don't add anything like as advertised.
150
+ """
151
+
152
+
153
+ def read_text_from_word(file_path):
154
+ """Extracts and returns all text from a Word document."""
155
+ # Load the document
156
+ doc = Document(file_path)
157
+
158
+ # Extract text from each paragraph
159
+ text = []
160
+ for paragraph in doc.paragraphs:
161
+ if paragraph.text.strip(): # Skip empty lines
162
+ text.append(paragraph.text.strip())
163
+
164
+ return "\n".join(text)
165
+
166
+ file_path = "Sample_Resume.docx" # Replace with your file path
167
+ resume_skills = read_text_from_word(file_path)
168
+
169
+ print(resume_skills)
170
+
171
+ def get_cl_user_prompt_with_scraped_jd(job_url, scraper, resume_skills):
172
+ # Scrape job description
173
+ description = scraper.scrape_job_descriptions([job_url]).get(job_url)
174
+
175
+ # Construct the prompt
176
+ user_prompt = "You are tasked with creating a professional and tailored Cover Letter for a job application.\n"
177
+ user_prompt += "Here is a list of skills and experiences from the candidate's resume:\n"
178
+ user_prompt += f"{resume_skills}\n\n"
179
+ user_prompt += "Here is the job description for the position they are applying for:\n"
180
+ user_prompt += f"{description}\n\n"
181
+ user_prompt += (
182
+ "Using the skills from the candidate's resume, craft a CL that highlights their most relevant qualifications "
183
+ "and experiences for this job making use of job description. "
184
+ "Ensure the CV follows a professional format and aligns with the role requirements. Present the CV in markdown format.\n"
185
+ )
186
+ return user_prompt
187
+
188
+ def create_jd(system_prompt, file_path, job_url, scraper, model="gpt-4"):
189
+ try:
190
+ # Generate user prompt
191
+ user_prompt = get_cl_user_prompt_with_scraped_jd(file_path, job_url, scraper)
192
+
193
+ # OpenAI API call
194
+ completion = openai.ChatCompletion.create(
195
+ model=model,
196
+ messages=[
197
+ {"role": "system", "content": system_prompt},
198
+ {"role": "user", "content": user_prompt}
199
+ ]
200
+ )
201
+
202
+ # Extract and display the result
203
+ result = completion.choices[0].message.content
204
+ display(Markdown(result))
205
+
206
+ except Exception as e:
207
+ print(f"An error occurred: {e}")
208
+
209
+ messages=[{"role": "system", "content": system_prompt}, {"role": "user", "content": get_cl_user_prompt_with_scraped_jd(file_path, job_url, scraper) }]
210
+ response = openai.chat.completions.create(model="gpt-4o-mini", messages=messages)
211
+ print(response.choices[0].message.content)
212
+
packages.txt ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ chromium
2
+ chromium-driver
project.ipynb ADDED
@@ -0,0 +1,468 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "cells": [
3
+ {
4
+ "cell_type": "code",
5
+ "execution_count": 4,
6
+ "metadata": {},
7
+ "outputs": [],
8
+ "source": [
9
+ "import os \n",
10
+ "import requests \n",
11
+ "import json \n",
12
+ "from typing import List \n",
13
+ "from dotenv import load_dotenv\n",
14
+ "from bs4 import BeautifulSoup \n",
15
+ "from IPython.display import Markdown, display, update_display \n",
16
+ "from openai import OpenAI\n",
17
+ "from selenium import webdriver\n",
18
+ "from selenium.webdriver.common.by import By\n",
19
+ "from selenium.webdriver.support.ui import WebDriverWait\n",
20
+ "from selenium.webdriver.support import expected_conditions as EC\n",
21
+ "from selenium.common.exceptions import TimeoutException, NoSuchElementException\n",
22
+ "import time"
23
+ ]
24
+ },
25
+ {
26
+ "cell_type": "code",
27
+ "execution_count": 5,
28
+ "metadata": {},
29
+ "outputs": [
30
+ {
31
+ "name": "stdout",
32
+ "output_type": "stream",
33
+ "text": [
34
+ "API key looks good so far\n"
35
+ ]
36
+ }
37
+ ],
38
+ "source": [
39
+ "# Initialize and constants\n",
40
+ "load_dotenv(override=True)\n",
41
+ "api_key = os.getenv('OPENAI_API_KEY')\n",
42
+ "\n",
43
+ "if api_key and api_key.startswith('sk-proj-') and len(api_key) > 10: \n",
44
+ " print(\"API key looks good so far\") \n",
45
+ "else: \n",
46
+ " print(\"There might be a problem with API key, Please Check\") \n",
47
+ "\n",
48
+ "MODEL_GPT = 'gpt-4o-mini'\n",
49
+ "openai = OpenAI()"
50
+ ]
51
+ },
52
+ {
53
+ "cell_type": "code",
54
+ "execution_count": 11,
55
+ "metadata": {},
56
+ "outputs": [
57
+ {
58
+ "name": "stdout",
59
+ "output_type": "stream",
60
+ "text": [
61
+ "\n",
62
+ "Job Description:\n",
63
+ "About the job\n",
64
+ "Description\n",
65
+ "\n",
66
+ "Are you looking to work at the forefront of Machine Learning and AI? Would you be excited to apply cutting edge Generative AI algorithms to solve real world problems with significant impact? The AWS Industries Team at AWS helps AWS customers implement Generative AI solutions and realize transformational business opportunities for AWS customers in the most strategic industry verticals. This is a team of data scientists, engineers, and architects working step-by-step with customers to build bespoke solutions that harness the power of generative AI.\n",
67
+ "\n",
68
+ "The team helps customers imagine and scope the use cases that will create the greatest value for their businesses, select and train and fine tune the right models, define paths to navigate technical or business challenges, develop proof-of-concepts, and build applications to launch these solutions at scale. The AWS Industries team provides guidance and implements best practices for applying generative AI responsibly and cost efficiently.\n",
69
+ "\n",
70
+ "You will work directly with customers and innovate in a fast-paced organization that contributes to game-changing projects and technologies. You will design and run experiments, research new algorithms, and find new ways of optimizing risk, profitability, and customer experience.\n",
71
+ "\n",
72
+ "In this Data Scientist role you will be capable of using GenAI and other techniques to design, evangelize, and implement and scale cutting-edge solutions for never-before-solved problems.\n",
73
+ "\n",
74
+ "Key job responsibilities\n",
75
+ "\n",
76
+ "As a Data Scientist, you will-\n",
77
+ "\n",
78
+ "\n",
79
+ "\n",
80
+ "\n",
81
+ "\n",
82
+ "ABOUT AWS:\n",
83
+ "\n",
84
+ "Diverse Experiences\n",
85
+ "\n",
86
+ "Amazon values diverse experiences. Even if you do not meet all of the preferred qualifications and skills listed in the job description, we encourage candidates to apply. If your career is just starting, hasn’t followed a traditional path, or includes alternative experiences, don’t let it stop you from applying.\n",
87
+ "\n",
88
+ "Why AWS\n",
89
+ "\n",
90
+ "Amazon Web Services (AWS) is the world’s most comprehensive and broadly adopted cloud platform. We pioneered cloud computing and never stopped innovating — that’s why customers from the most successful startups to Global 500 companies trust our robust suite of products and services to power their businesses.\n",
91
+ "\n",
92
+ "Work/Life Balance\n",
93
+ "\n",
94
+ "We value work-life harmony. Achieving success at work should never come at the expense of sacrifices at home, which is why we strive for flexibility as part of our working culture. When we feel supported in the workplace and at home, there’s nothing we can’t achieve in the cloud.\n",
95
+ "\n",
96
+ "Inclusive Team Culture\n",
97
+ "\n",
98
+ "Here at AWS, it’s in our nature to learn and be curious. Our employee-led affinity groups foster a culture of inclusion that empower us to be proud of our differences. Ongoing events and learning experiences, including our Conversations on Race and Ethnicity (CORE) and AmazeCon (gender diversity) conferences, inspire us to never stop embracing our uniqueness.\n",
99
+ "\n",
100
+ "Mentorship and Career Growth\n",
101
+ "\n",
102
+ "We’re continuously raising our performance bar as we strive to become Earth’s Best Employer. That’s why you’ll find endless knowledge-sharing, mentorship and other career-advancing resources here to help you develop into a better-rounded professional.\n",
103
+ "\n",
104
+ "\n",
105
+ "\n",
106
+ "\n",
107
+ "\n",
108
+ "\n",
109
+ "\n",
110
+ "\n",
111
+ "\n",
112
+ "Amazon is committed to a diverse and inclusive workplace. Amazon is an equal opportunity employer and does not discriminate on the basis of race, national origin, gender, gender identity, sexual orientation, protected veteran status, disability, age, or other legally protected status. For individuals with disabilities who would like to request an accommodation, please visit https://www.amazon.jobs/en/disability/us.\n",
113
+ "\n",
114
+ "Los Angeles County applicants: Job duties for this position include: work safely and cooperatively with other employees, supervisors, and staff; adhere to standards of excellence despite stressful conditions; communicate effectively and respectfully with employees, supervisors, and staff to ensure exceptional customer service; and follow all federal, state, and local laws and Company policies. Criminal history may have a direct, adverse, and negative relationship with some of the material job duties of this position. These include the duties and responsibilities listed above, as well as the abilities to adhere to company policies, exercise sound judgment, effectively manage stress and work safely and respectfully with others, exhibit trustworthiness and professionalism, and safeguard business operations and the Company’s reputation. Pursuant to the Los Angeles County Fair Chance Ordinance, we will consider for employment qualified applicants with arrest and conviction records.\n",
115
+ "\n",
116
+ "Our compensation reflects the cost of labor across several US geographic markets. The base pay for this position ranges from $125,500/year in our lowest geographic market up to $212,800/year in our highest geographic market. Pay is based on a number of factors including market location and may vary depending on job-related knowledge, skills, and experience. Amazon is a total compensation company. Dependent on the position offered, equity, sign-on payments, and other forms of compensation may be provided as part of a total compensation package, in addition to a full range of medical, financial, and/or other benefits. For more information, please visit https://www.aboutamazon.com/workplace/employee-benefits. This position will remain posted until filled. Applicants should apply via our internal or external career site.\n",
117
+ "\n",
118
+ "\n",
119
+ "- Amazon Web Services, Inc.\n",
120
+ "\n",
121
+ "Job ID: A2800210\n",
122
+ "See more\n"
123
+ ]
124
+ }
125
+ ],
126
+ "source": [
127
+ "class LinkedInJDScraper:\n",
128
+ " def __init__(self, email, password):\n",
129
+ " options = webdriver.ChromeOptions()\n",
130
+ " options.add_argument('--disable-blink-features=AutomationControlled')\n",
131
+ " options.add_argument('--start-maximized')\n",
132
+ " # Add these options to prevent infinite loops\n",
133
+ " options.add_argument('--no-sandbox')\n",
134
+ " options.add_argument('--disable-dev-shm-usage')\n",
135
+ " \n",
136
+ " self.driver = webdriver.Chrome(options=options)\n",
137
+ " self.wait = WebDriverWait(self.driver, 10) # Reduced wait time\n",
138
+ " self.email = email\n",
139
+ " self.password = password\n",
140
+ " \n",
141
+ " def login(self):\n",
142
+ " try:\n",
143
+ " self.driver.get(\"https://www.linkedin.com/login\")\n",
144
+ " \n",
145
+ " # Wait for email field with timeout\n",
146
+ " email_field = self.wait.until(\n",
147
+ " EC.presence_of_element_located((By.ID, \"username\"))\n",
148
+ " )\n",
149
+ " email_field.send_keys(self.email)\n",
150
+ " \n",
151
+ " # Find password field\n",
152
+ " password_field = self.driver.find_element(By.ID, \"password\")\n",
153
+ " password_field.send_keys(self.password)\n",
154
+ " \n",
155
+ " # Find and click login button\n",
156
+ " login_button = self.driver.find_element(By.CSS_SELECTOR, \"button[type='submit']\")\n",
157
+ " login_button.click()\n",
158
+ " \n",
159
+ " # Wait for login to complete with timeout\n",
160
+ " time.sleep(3)\n",
161
+ " return True\n",
162
+ " \n",
163
+ " except Exception as e:\n",
164
+ " print(f\"Login failed: {str(e)}\")\n",
165
+ " return False\n",
166
+ " \n",
167
+ " def get_description(self, job_url):\n",
168
+ " max_retries = 3\n",
169
+ " retry_count = 0\n",
170
+ " \n",
171
+ " while retry_count < max_retries:\n",
172
+ " try:\n",
173
+ " # Navigate to job page\n",
174
+ " self.driver.get(job_url)\n",
175
+ " \n",
176
+ " # Wait for any description element to be present\n",
177
+ " self.wait.until(\n",
178
+ " EC.presence_of_element_located((By.CSS_SELECTOR, \"div.jobs-description\"))\n",
179
+ " )\n",
180
+ " \n",
181
+ " # Try to click \"Show more\" if it exists\n",
182
+ " try:\n",
183
+ " show_more = self.wait.until(\n",
184
+ " EC.presence_of_element_located((By.CSS_SELECTOR, \"button.show-more-less-html__button\"))\n",
185
+ " )\n",
186
+ " if show_more.is_displayed():\n",
187
+ " show_more.click()\n",
188
+ " time.sleep(1)\n",
189
+ " except (TimeoutException, NoSuchElementException):\n",
190
+ " pass\n",
191
+ " \n",
192
+ " # Get description text\n",
193
+ " description_element = self.driver.find_element(By.CSS_SELECTOR, \"div.jobs-description\")\n",
194
+ " description = description_element.text\n",
195
+ " \n",
196
+ " if description:\n",
197
+ " # Find start of job description\n",
198
+ " start_index = description.lower().find(\"about the job\")\n",
199
+ " if start_index != -1:\n",
200
+ " return description[start_index:]\n",
201
+ " return description\n",
202
+ " \n",
203
+ " retry_count += 1\n",
204
+ " time.sleep(2)\n",
205
+ " \n",
206
+ " except Exception as e:\n",
207
+ " print(f\"Attempt {retry_count + 1} failed: {str(e)}\")\n",
208
+ " retry_count += 1\n",
209
+ " time.sleep(2)\n",
210
+ " \n",
211
+ " return \"Failed to retrieve job description after multiple attempts\"\n",
212
+ " \n",
213
+ " def close(self):\n",
214
+ " try:\n",
215
+ " self.driver.quit()\n",
216
+ " except:\n",
217
+ " pass\n",
218
+ "\n",
219
+ "if __name__ == \"__main__\":\n",
220
+ " EMAIL = os.getenv('LINKEDIN_EMAIL')\n",
221
+ " PASSWORD = os.getenv('LINKEDIN_PASSWORD')\n",
222
+ " \n",
223
+ " scraper = LinkedInJDScraper(EMAIL, PASSWORD)\n",
224
+ " \n",
225
+ " try:\n",
226
+ " if scraper.login():\n",
227
+ " job_url = \"https://www.linkedin.com/jobs/view/4109295398\"\n",
228
+ " description = scraper.get_description(job_url)\n",
229
+ " print(\"\\nJob Description:\")\n",
230
+ " print(description)\n",
231
+ " else:\n",
232
+ " print(\"Failed to login. Please check your credentials.\")\n",
233
+ " finally:\n",
234
+ " scraper.close()"
235
+ ]
236
+ },
237
+ {
238
+ "cell_type": "code",
239
+ "execution_count": 7,
240
+ "metadata": {},
241
+ "outputs": [],
242
+ "source": [
243
+ "system_prompt = \"\"\"\n",
244
+ "You are a career assistant specialized in crafting professional and personalized cover letters. \n",
245
+ "Your goal is to create compelling, tailored cover letters that align with the job description. \n",
246
+ "Each cover letter should emphasize the user’s qualifications, skills, and experiences while maintaining a professional tone and structure. \n",
247
+ "Ensure the letter adheres to the following format:\n",
248
+ "\n",
249
+ "1. **Introduction**: A brief and enthusiastic introduction expressing interest in the role and organization.\n",
250
+ "2. **Body**: Highlight relevant skills, experiences, and achievements that align with the job description. Use specific examples when possible.\n",
251
+ "3. **Closing**: Reiterate enthusiasm for the role, express willingness to contribute to the organization, and include a polite call to action.\n",
252
+ "\n",
253
+ "Maintain clarity, professionalism, and conciseness while tailoring the letter. Don't add anything like as advertised.\n",
254
+ "\"\"\""
255
+ ]
256
+ },
257
+ {
258
+ "cell_type": "code",
259
+ "execution_count": 12,
260
+ "metadata": {},
261
+ "outputs": [
262
+ {
263
+ "name": "stdout",
264
+ "output_type": "stream",
265
+ "text": [
266
+ "John Doe\n",
267
+ "john.doe@example.com | +1 123-456-7890 | linkedin.com/in/johndoe\n",
268
+ "EDUCATION\n",
269
+ "•\tMaster of Science in Computer Science\n",
270
+ "Stanford University, Stanford, CA\n",
271
+ "September 2016 – June 2018\n",
272
+ "GPA: 3.9/4.0\n",
273
+ "•\tBachelor of Science in Statistics\n",
274
+ "University of California, Berkeley, CA\n",
275
+ "August 2012 – May 2016\n",
276
+ "GPA: 3.8/4.0\n",
277
+ "PROFESSIONAL EXPERIENCE\n",
278
+ "•\tData Scientist\n",
279
+ "Meta Platforms, Inc., Menlo Park, CA\n",
280
+ "July 2018 – Present\n",
281
+ "•\tDeveloped and deployed machine learning models to enhance user engagement, resulting in a 15% increase in active user retention over 12 months.\n",
282
+ "•\tLed a cross-functional team to implement a recommendation system for personalized content delivery, boosting click-through rates by 20%.\n",
283
+ "•\tConducted A/B testing and statistical analysis to inform product decisions, contributing to a 10% improvement in ad revenue.\n",
284
+ "•\tOptimized data pipelines using SQL and Python, reducing data processing time by 30%.\n",
285
+ "•\tMentored junior data scientists and conducted training sessions on advanced analytics techniques.\n",
286
+ "PROJECTS\n",
287
+ "•\tReal-Time Fraud Detection System\n",
288
+ "Designed and implemented a real-time fraud detection system using anomaly detection algorithms, reducing fraudulent activities by 25%.\n",
289
+ "•\tCustomer Segmentation Analysis\n",
290
+ "Performed clustering analysis to segment customers, enabling targeted marketing strategies that increased conversion rates by 18%.\n",
291
+ "•\tNatural Language Processing for Sentiment Analysis\n",
292
+ "Developed an NLP model to analyze customer feedback, providing insights that improved customer satisfaction scores by 12%.\n",
293
+ "SKILLS\n",
294
+ "•\tProgramming Languages: Python, R, SQL, Java\n",
295
+ "•\tMachine Learning Frameworks: TensorFlow, PyTorch, Scikit-learn\n",
296
+ "•\tData Visualization Tools: Tableau, Matplotlib, Seaborn\n",
297
+ "•\tBig Data Technologies: Hadoop, Spark\n",
298
+ "•\tDatabases: MySQL, PostgreSQL, MongoDB\n",
299
+ "•\tTools & Platforms: Git, Docker, AWS, Azure\n",
300
+ "CERTIFICATIONS\n",
301
+ "•\tCertified Data Scientist\n",
302
+ "Data Science Council of America (DASCA)\n",
303
+ "Issued June 2018\n",
304
+ "•\tMachine Learning Specialization\n",
305
+ "Coursera – Stanford University\n",
306
+ "Completed May 2017\n",
307
+ "ACHIEVEMENTS\n",
308
+ "•\tRecognized as “Employee of the Quarter” for Q2 2020 at Meta for outstanding contributions to the data science team.\n",
309
+ "•\tPublished research on machine learning algorithms in the Journal of Data Science (March 2019).\n",
310
+ "•\tSpeaker at the 2021 International Conference on Data Mining, presenting on scalable data pipeline architectures.\n",
311
+ "If you need this content in a DOCX file, please let me know, and I can provide a download link for you.\n"
312
+ ]
313
+ }
314
+ ],
315
+ "source": [
316
+ "from docx import Document\n",
317
+ "\n",
318
+ "def read_text_from_word(file_path):\n",
319
+ " \"\"\"Extracts and returns all text from a Word document.\"\"\"\n",
320
+ " # Load the document\n",
321
+ " doc = Document(file_path)\n",
322
+ " \n",
323
+ " # Extract text from each paragraph\n",
324
+ " text = []\n",
325
+ " for paragraph in doc.paragraphs:\n",
326
+ " if paragraph.text.strip(): # Skip empty lines\n",
327
+ " text.append(paragraph.text.strip())\n",
328
+ " \n",
329
+ " return \"\\n\".join(text)\n",
330
+ "\n",
331
+ "# Example Usage\n",
332
+ "file_path = \"Sample_Resume.docx\" # Replace with your file path\n",
333
+ "resume_skills = read_text_from_word(file_path)\n",
334
+ "\n",
335
+ "print(resume_skills)"
336
+ ]
337
+ },
338
+ {
339
+ "cell_type": "code",
340
+ "execution_count": 13,
341
+ "metadata": {},
342
+ "outputs": [],
343
+ "source": [
344
+ "def get_cl_user_prompt_with_scraped_jd(file_path, job_url, scraper):\n",
345
+ " \"\"\"\n",
346
+ " Generate a user prompt for CL creation using scraped job description \n",
347
+ " and skills from a Word document.\n",
348
+ " \"\"\"\n",
349
+ "\n",
350
+ " # # Scrape the job description\n",
351
+ " # job_description = scraper.get_description(job_url)\n",
352
+ " \n",
353
+ " # Create the user prompt\n",
354
+ " user_prompt = \"You are tasked with creating a professional and tailored Cover Letter for a job application.\\n\"\n",
355
+ " user_prompt += \"Here is a list of skills and experiences from the candidate's resume:\\n\"\n",
356
+ " user_prompt += f\"{resume_skills}\\n\\n\"\n",
357
+ " user_prompt += \"Here is the job description for the position they are applying for:\\n\"\n",
358
+ " user_prompt += f\"{description}\\n\\n\"\n",
359
+ " user_prompt += \"Using the skills from the candidate's resume, craft a CL that highlights their most relevant qualifications and experiences for this job making use of job description. \"\n",
360
+ " user_prompt += \"Ensure the CV follows a professional format and aligns with the role requirements. Present the CV in markdown format.\\n\"\n",
361
+ " user_prompt = user_prompt \n",
362
+ " return user_prompt"
363
+ ]
364
+ },
365
+ {
366
+ "cell_type": "code",
367
+ "execution_count": 14,
368
+ "metadata": {},
369
+ "outputs": [],
370
+ "source": [
371
+ "from IPython.display import Markdown, display\n",
372
+ "\n",
373
+ "def create_jd(system_prompt, file_path, job_url, scraper, model=\"gpt-4\"):\n",
374
+ "\n",
375
+ " # OpenAI API call\n",
376
+ " completion = openai.ChatCompletion.create(\n",
377
+ " model=model,\n",
378
+ " messages=[\n",
379
+ " {\"role\": \"system\", \"content\": system_prompt},\n",
380
+ " {\"role\": \"user\", \"content\": get_cl_user_prompt_with_scraped_jd(file_path, job_url, scraper)}\n",
381
+ " ]\n",
382
+ " )\n",
383
+ "\n",
384
+ " # Extract and display the result\n",
385
+ " result = completion.choices[0].message.content\n",
386
+ " display(Markdown(result))"
387
+ ]
388
+ },
389
+ {
390
+ "cell_type": "code",
391
+ "execution_count": 15,
392
+ "metadata": {},
393
+ "outputs": [],
394
+ "source": [
395
+ "messages=[{\"role\": \"system\", \"content\": system_prompt}, {\"role\": \"user\", \"content\": get_cl_user_prompt_with_scraped_jd(file_path, job_url, scraper) }]"
396
+ ]
397
+ },
398
+ {
399
+ "cell_type": "code",
400
+ "execution_count": 16,
401
+ "metadata": {},
402
+ "outputs": [
403
+ {
404
+ "name": "stdout",
405
+ "output_type": "stream",
406
+ "text": [
407
+ "```markdown\n",
408
+ "John Doe \n",
409
+ "john.doe@example.com | +1 123-456-7890 | linkedin.com/in/johndoe \n",
410
+ "\n",
411
+ "[Date] \n",
412
+ "\n",
413
+ "Hiring Manager \n",
414
+ "Amazon Web Services, Inc. \n",
415
+ "[Company Address] \n",
416
+ "[City, State, Zip] \n",
417
+ "\n",
418
+ "Dear Hiring Manager,\n",
419
+ "\n",
420
+ "I am writing to express my enthusiasm for the Data Scientist position within the AWS Industries Team, as advertised. With a strong foundation in machine learning, data analytics, and a proven track record of developing innovative solutions to enhance user engagement, I am excited about the opportunity to contribute to cutting-edge projects at Amazon Web Services.\n",
421
+ "\n",
422
+ "During my time as a Data Scientist at Meta Platforms, Inc., I developed and deployed machine learning models that significantly increased active user retention by 15%. Leading a cross-functional team to implement a personalized content recommendation system, I contributed to a 20% boost in click-through rates, showcasing my ability to drive substantial improvements through the application of advanced analytics. My experience in conducting A/B testing and statistical analysis allowed me to make informed product decisions, resulting in a 10% increase in ad revenue—a key aspect that aligns with AWS's commitment to optimizing customer experience and profitability.\n",
423
+ "\n",
424
+ "One of my most impactful projects involved designing a real-time fraud detection system using anomaly detection algorithms, reducing fraudulent activities by 25%. This aligns with the responsibilities of the AWS Industries Team to develop bespoke solutions that tackle complex problems effectively. Additionally, my clustering analysis for customer segmentation led to targeted marketing strategies that increased conversion rates by 18%, demonstrating my capacity to innovate and tailor solutions to specific business needs.\n",
425
+ "\n",
426
+ "I am particularly drawn to the emphasis on generative AI within the AWS Industries Team. My background in programming languages such as Python, R, and SQL, combined with experience using machine learning frameworks like TensorFlow and PyTorch, prepares me well for designing and implementing generative AI solutions. Furthermore, my capability to optimize data pipelines using tools like Hadoop and Spark ensures that I can provide efficient and scalable solutions for AWS clients.\n",
427
+ "\n",
428
+ "The opportunity to work with diverse teams, guiding clients through the implementation of generative AI solutions, excites me greatly. I am eager to leverage my skills to help AWS customers realize transformational business opportunities while adhering to the principles of responsible and cost-efficient implementation.\n",
429
+ "\n",
430
+ "Thank you for considering my application. I am excited about the possibility of contributing to the innovative work at AWS and would welcome the opportunity to discuss my application further. I look forward to the chance to speak with you soon.\n",
431
+ "\n",
432
+ "Warm regards,\n",
433
+ "\n",
434
+ "John Doe\n",
435
+ "```\n"
436
+ ]
437
+ }
438
+ ],
439
+ "source": [
440
+ "# To give you a preview -- calling OpenAI with system and user messages:\n",
441
+ "\n",
442
+ "response = openai.chat.completions.create(model=\"gpt-4o-mini\", messages=messages)\n",
443
+ "print(response.choices[0].message.content)"
444
+ ]
445
+ }
446
+ ],
447
+ "metadata": {
448
+ "kernelspec": {
449
+ "display_name": "Python 3",
450
+ "language": "python",
451
+ "name": "python3"
452
+ },
453
+ "language_info": {
454
+ "codemirror_mode": {
455
+ "name": "ipython",
456
+ "version": 3
457
+ },
458
+ "file_extension": ".py",
459
+ "mimetype": "text/x-python",
460
+ "name": "python",
461
+ "nbconvert_exporter": "python",
462
+ "pygments_lexer": "ipython3",
463
+ "version": "3.12.7"
464
+ }
465
+ },
466
+ "nbformat": 4,
467
+ "nbformat_minor": 2
468
+ }
requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ requests==2.32.3
2
+ openai==1.57.4
3
+ python-docx==1.1.2
4
+ python-dotenv==1.0.1
5
+ beautifulsoup4==4.12.3
6
+ selenium==4.27.1
7
+ gradio==3.50
8
+ webdriver_manager==4.0.1
resume_reader.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from docx import Document
2
+
3
+ def read_text_from_word(file_path):
4
+ """Extracts and returns all text from a Word document."""
5
+ # Load the document
6
+ doc = Document(file_path)
7
+
8
+ # Extract text from each paragraph
9
+ text = []
10
+ for paragraph in doc.paragraphs:
11
+ if paragraph.text.strip(): # Skip empty lines
12
+ text.append(paragraph.text.strip())
13
+
14
+ return "\n".join(text)
15
+
16
+ file_path = "Sample_Resume.docx" # Replace with your file path
17
+ resume_skills = read_text_from_word(file_path)
18
+
19
+ print(resume_skills)
scraper.py ADDED
@@ -0,0 +1,66 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+
2
+ import os
3
+ from selenium import webdriver
4
+ from selenium.webdriver.common.by import By
5
+ from selenium.webdriver.support.ui import WebDriverWait
6
+ from selenium.webdriver.support import expected_conditions as EC
7
+ from selenium.common.exceptions import TimeoutException, NoSuchElementException
8
+ from bs4 import BeautifulSoup
9
+ from dotenv import load_dotenv
10
+
11
+ class LinkedInJDScraper:
12
+ def __init__(self, email, password):
13
+ # Initialize the Selenium WebDriver
14
+ options = webdriver.ChromeOptions()
15
+ options.add_argument("--headless")
16
+ self.driver = webdriver.Chrome(options=options)
17
+ self.email = email
18
+ self.password = password
19
+
20
+ def login(self):
21
+ # Login to LinkedIn
22
+ try:
23
+ self.driver.get("https://www.linkedin.com/login")
24
+ WebDriverWait(self.driver, 10).until(EC.presence_of_element_located((By.ID, "username")))
25
+ self.driver.find_element(By.ID, "username").send_keys(self.email)
26
+ self.driver.find_element(By.ID, "password").send_keys(self.password)
27
+ self.driver.find_element(By.XPATH, '//button[@type="submit"]').click()
28
+ except TimeoutException:
29
+ print("Login page did not load in time.")
30
+
31
+ def scrape_job_descriptions(self, job_urls):
32
+ # Scrape job descriptions from provided LinkedIn job URLs
33
+ job_descriptions = {}
34
+ for url in job_urls:
35
+ try:
36
+ self.driver.get(url)
37
+ WebDriverWait(self.driver, 10).until(
38
+ EC.presence_of_element_located((By.CLASS_NAME, "jobs-description-content"))
39
+ )
40
+ page_source = self.driver.page_source
41
+ soup = BeautifulSoup(page_source, "html.parser")
42
+ description = soup.find("div", class_="jobs-description-content").get_text(strip=True)
43
+ job_descriptions[url] = description
44
+ except (TimeoutException, NoSuchElementException) as e:
45
+ print(f"Error scraping {url}: {e}")
46
+ job_descriptions[url] = None
47
+ return job_descriptions
48
+
49
+ def close(self):
50
+ # Close the WebDriver
51
+ self.driver.quit()
52
+
53
+ if __name__ == "__main__":
54
+ load_dotenv()
55
+ email = os.getenv("LINKEDIN_EMAIL")
56
+ password = os.getenv("LINKEDIN_PASSWORD")
57
+
58
+ # Example usage
59
+ scraper = LinkedInJDScraper(email, password)
60
+ scraper.login()
61
+ job_urls = [
62
+ "https://www.linkedin.com/jobs/view/4123644034", # Add your LinkedIn job ID only here
63
+ ]
64
+ job_descriptions = scraper.scrape_job_descriptions(job_urls)
65
+ print(job_descriptions)
66
+ scraper.close()
system_prompt.py ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ system_prompt = """
2
+ You are a career assistant specialized in crafting professional and personalized cover letters.
3
+ Your goal is to create compelling, tailored cover letters that align with the job description.
4
+ Each cover letter should emphasize the user’s qualifications, skills, and experiences while maintaining a professional tone and structure.
5
+ Ensure the letter adheres to the following format:
6
+
7
+ 1. **Introduction**: A brief and enthusiastic introduction expressing interest in the role and organization.
8
+ 2. **Body**: Highlight relevant skills, experiences, and achievements that align with the job description. Use specific examples when possible.
9
+ 3. **Closing**: Reiterate enthusiasm for the role, express willingness to contribute to the organization, and include a polite call to action.
10
+
11
+ Maintain clarity, professionalism, and conciseness while tailoring the letter. Don't add anything like as advertised.
12
+ """
user_prompt.py ADDED
@@ -0,0 +1,16 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ def get_cl_user_prompt_with_scraped_jd(job_url, scraper, resume_skills):
2
+ # Scrape job description
3
+ description = scraper.scrape_job_descriptions([job_url]).get(job_url)
4
+
5
+ # Construct the prompt
6
+ user_prompt = "You are tasked with creating a professional and tailored Cover Letter for a job application.\n"
7
+ user_prompt += "Here is a list of skills and experiences from the candidate's resume:\n"
8
+ user_prompt += f"{resume_skills}\n\n"
9
+ user_prompt += "Here is the job description for the position they are applying for:\n"
10
+ user_prompt += f"{description}\n\n"
11
+ user_prompt += (
12
+ "Using the skills from the candidate's resume, craft a CL that highlights their most relevant qualifications "
13
+ "and experiences for this job making use of job description. "
14
+ "Ensure the CV follows a professional format and aligns with the role requirements. Present the CV in markdown format.\n"
15
+ )
16
+ return user_prompt