middha commited on
Commit
2fe069a
·
verified ·
1 Parent(s): 6d9e297

Upload 12 files

Browse files
Files changed (13) hide show
  1. .gitattributes +1 -0
  2. .gitignore +1 -0
  3. CareerCrafter1.0.code-workspace +8 -0
  4. Dockerfile +20 -0
  5. Procfile.txt +1 -0
  6. README.md +3 -10
  7. app.log +0 -0
  8. app.py +387 -0
  9. bfg-1.15.0.jar +3 -0
  10. myfirstcode.py +32 -0
  11. prepme_gui.py +0 -0
  12. requirements.txt +9 -0
  13. test.py +32 -0
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ bfg-1.15.0.jar filter=lfs diff=lfs merge=lfs -text
.gitignore ADDED
@@ -0,0 +1 @@
 
 
1
+ .env\nCareerCrafter/.env
CareerCrafter1.0.code-workspace ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ {
2
+ "folders": [
3
+ {
4
+ "path": "."
5
+ }
6
+ ],
7
+ "settings": {}
8
+ }
Dockerfile ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Use the official Python image as the base image
2
+ FROM python:3.12-slim
3
+
4
+ # Set the working directory in the container
5
+ WORKDIR /app
6
+
7
+ # Copy the requirements file into the container
8
+ COPY requirements.txt ./
9
+
10
+ # Install the dependencies
11
+ RUN pip install --no-cache-dir -r requirements.txt
12
+
13
+ # Copy the rest of the application code into the container
14
+ COPY . .
15
+
16
+ # Expose the port the app runs on
17
+ EXPOSE 5000
18
+
19
+ # Command to run the application
20
+ CMD ["python", "app.py"]
Procfile.txt ADDED
@@ -0,0 +1 @@
 
 
1
+ web: gunicorn app:app --bind 0.0.0.0:$PORT
README.md CHANGED
@@ -1,10 +1,3 @@
1
- ---
2
- title: Cc
3
- emoji: 🦀
4
- colorFrom: pink
5
- colorTo: purple
6
- sdk: docker
7
- pinned: false
8
- ---
9
-
10
- Check out the configuration reference at https://huggingface.co/docs/hub/spaces-config-reference
 
1
+ # CareerCraft
2
+
3
+ CareerCraft is a comprehensive job management platform designed to streamline the job application process and enhance career growth. With features like resume uploads, job tracking, and preparation tools, CareerCraft empowers users to craft their career paths effectively.
 
 
 
 
 
 
 
app.log ADDED
The diff for this file is too large to render. See raw diff
 
app.py ADDED
@@ -0,0 +1,387 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import logging
2
+ from logging.handlers import RotatingFileHandler
3
+ from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, Response
4
+ from flask_sqlalchemy import SQLAlchemy
5
+ import os
6
+ from sqlalchemy.exc import SQLAlchemyError
7
+ from sqlalchemy import or_
8
+ import pandas as pd
9
+ import openai, sys # Import OpenAI library
10
+ from dotenv import load_dotenv
11
+ from flask_migrate import Migrate
12
+ from openai import OpenAI
13
+ from datetime import datetime, timedelta
14
+
15
+ # Load environment variables from .env file
16
+ load_dotenv()
17
+
18
+ # Set your OpenAI API key from the .env file
19
+ openai.api_key = os.getenv("OPENAI_API_KEY")
20
+
21
+ # Configure logging with rotating file handler
22
+ handler = RotatingFileHandler('app.log', maxBytes=10000000, backupCount=5)
23
+ handler.setFormatter(logging.Formatter(
24
+ '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
25
+ ))
26
+ logging.basicConfig(
27
+ level=logging.DEBUG,
28
+ handlers=[handler]
29
+ )
30
+
31
+ # Add logger to Flask app
32
+ app = Flask("Career Crafter")
33
+ app.logger.addHandler(handler)
34
+
35
+ app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///jobs.db'
36
+ app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
37
+ app.secret_key = 'your_secret_key_here'
38
+
39
+ db = SQLAlchemy(app)
40
+
41
+ # Initialize Flask-Migrate
42
+ migrate = Migrate(app, db)
43
+
44
+ class Job(db.Model):
45
+ id = db.Column(db.Integer, primary_key=True)
46
+ company = db.Column(db.String(100), nullable=False)
47
+ position = db.Column(db.String(200), nullable=False)
48
+ resume_used = db.Column(db.String(200))
49
+ date_applied = db.Column(db.String(20))
50
+ status = db.Column(db.String(50))
51
+ interview_details = db.Column(db.Text)
52
+ comments = db.Column(db.Text)
53
+ link = db.Column(db.String(300))
54
+ job_description = db.Column(db.Text) # New field for job description
55
+
56
+ def __repr__(self):
57
+ return f"<Job {self.company} - {self.position}>"
58
+
59
+ # Add logging to track application flow
60
+ logging.info("Starting Flask application")
61
+
62
+ def convert_excel_serial_dates():
63
+ """Convert any numeric Excel date serials in date_applied to ISO date strings."""
64
+ updated = False
65
+ for job in Job.query.all():
66
+ da = job.date_applied
67
+ if da and isinstance(da, (str,)) and da.isdigit():
68
+ try:
69
+ serial = int(da)
70
+ dt = datetime(1899, 12, 30) + timedelta(days=serial)
71
+ job.date_applied = dt.strftime('%Y-%m-%d')
72
+ updated = True
73
+ except ValueError:
74
+ continue
75
+ if updated:
76
+ db.session.commit()
77
+ app.logger.info('Converted Excel serial dates to ISO strings in database')
78
+
79
+ @app.route('/')
80
+ def index():
81
+ return redirect('/jobs')
82
+
83
+ @app.route('/jobs', methods=['GET', 'POST'])
84
+ def jobs():
85
+ try:
86
+ if request.method == 'POST':
87
+ app.logger.info("Received POST request to add a new job", extra={
88
+ 'company': request.form['company'],
89
+ 'position': request.form['position']
90
+ })
91
+ new_job = Job(
92
+ company=request.form['company'],
93
+ position=request.form['position'],
94
+ resume_used=request.form.get('resume_used'),
95
+ date_applied=request.form.get('date_applied'),
96
+ status=request.form.get('status'),
97
+ interview_details=request.form.get('interview_details'),
98
+ comments=request.form.get('comments'),
99
+ link=request.form.get('link')
100
+ )
101
+ db.session.add(new_job)
102
+ db.session.commit()
103
+ app.logger.info("Successfully added new job", extra={
104
+ 'job_id': new_job.id,
105
+ 'company': new_job.company,
106
+ 'position': new_job.position
107
+ })
108
+ return redirect(url_for('jobs'))
109
+
110
+ app.logger.debug("Fetching jobs from database")
111
+ search_query = request.args.get('search', '').strip()
112
+ sort_by = request.args.get('sort', '').strip()
113
+ direction = request.args.get('direction', 'asc')
114
+ # Read status filter from query args
115
+ status_filter = request.args.get('status_filter', '').strip()
116
+ # Build base query
117
+ jobs_query = Job.query
118
+ # Apply search filter
119
+ if search_query:
120
+ filter_cond = or_(
121
+ Job.company.ilike(f"%{search_query}%"),
122
+ Job.position.ilike(f"%{search_query}%"),
123
+ Job.status.ilike(f"%{search_query}%")
124
+ )
125
+ jobs_query = jobs_query.filter(filter_cond)
126
+ app.logger.info(f"Filtering jobs with search '{search_query}'", extra={'search_query': search_query})
127
+ # Apply status filter
128
+ valid_statuses = {'Applied', 'Rejected', 'Interviewing'}
129
+ if status_filter in valid_statuses:
130
+ jobs_query = jobs_query.filter_by(status=status_filter)
131
+ app.logger.info(f"Filtering jobs by status '{status_filter}'", extra={'status_filter': status_filter})
132
+ # Apply sorting with direction
133
+ valid_sorts = {'position', 'date_applied', 'status'}
134
+ if sort_by in valid_sorts:
135
+ col = getattr(Job, sort_by)
136
+ if direction == 'desc':
137
+ jobs_query = jobs_query.order_by(col.desc())
138
+ else:
139
+ jobs_query = jobs_query.order_by(col.asc())
140
+ app.logger.info(f"Sorting jobs by '{sort_by}' {direction}", extra={'sort_by': sort_by, 'direction': direction})
141
+ # Execute query
142
+ jobs_list = jobs_query.all()
143
+ app.logger.info(f"Retrieved {len(jobs_list)} jobs", extra={ 'job_count': len(jobs_list), 'search_query': search_query, 'sort_by': sort_by })
144
+
145
+ # Compute counts for Applied, Rejected, Interviewing
146
+ applied_count = Job.query.filter_by(status='Applied').count()
147
+ rejected_count = Job.query.filter_by(status='Rejected').count()
148
+ interviewing_count = Job.query.filter_by(status='Interviewing').count()
149
+ return render_template('jobs.html', jobs=jobs_list, search_query=search_query,
150
+ applied_count=applied_count,
151
+ rejected_count=rejected_count,
152
+ interviewing_count=interviewing_count,
153
+ sort_by=sort_by,
154
+ direction=direction,
155
+ status_filter=status_filter)
156
+ except SQLAlchemyError as e:
157
+ app.logger.error("Database error in jobs route", exc_info=True, extra={
158
+ 'error': str(e)
159
+ })
160
+ flash(f"Database error: {e}")
161
+ return render_template('jobs.html', jobs=[], search_query='',
162
+ applied_count=0, rejected_count=0, interviewing_count=0,
163
+ sort_by='', direction='asc', status_filter='')
164
+
165
+ @app.route('/edit_job/<int:job_id>', methods=['GET', 'POST'])
166
+ def edit_job(job_id):
167
+ job = Job.query.get_or_404(job_id)
168
+ app.logger.info("Accessing job for edit", extra={
169
+ 'job_id': job_id,
170
+ 'company': job.company,
171
+ 'position': job.position
172
+ })
173
+
174
+ if request.method == 'POST':
175
+ app.logger.info("Updating job details", extra={
176
+ 'job_id': job_id,
177
+ 'old_status': job.status,
178
+ 'new_status': request.form.get('status')
179
+ })
180
+
181
+ # Update the job details
182
+ job.company = request.form['company']
183
+ job.position = request.form['position']
184
+ job.resume_used = request.form.get('resume_used')
185
+ job.date_applied = request.form.get('date_applied')
186
+ job.status = request.form.get('status')
187
+ job.interview_details = request.form.get('interview_details')
188
+ job.comments = request.form.get('comments')
189
+ job.link = request.form.get('link')
190
+ job.job_description = request.form.get('job_description')
191
+
192
+ try:
193
+ db.session.commit()
194
+ app.logger.info("Successfully updated job", extra={
195
+ 'job_id': job_id,
196
+ 'company': job.company
197
+ })
198
+
199
+ if job.job_description:
200
+ app.logger.debug("Generating interview plan")
201
+ interview_plan = generate_interview_plan(job.job_description)
202
+ flash(f"Interview Plan: {interview_plan}")
203
+
204
+ return redirect(url_for('jobs'))
205
+ except SQLAlchemyError as e:
206
+ app.logger.error("Failed to update job", exc_info=True, extra={
207
+ 'job_id': job_id,
208
+ 'error': str(e)
209
+ })
210
+ flash(f"Error updating job: {e}")
211
+ db.session.rollback()
212
+
213
+ return render_template('jobs.html', jobs=Job.query.all(), edit_job=job)
214
+
215
+ # Function to generate an interview plan using OpenAI's GPT model
216
+ def generate_interview_plan(job_description):
217
+ import os, sys
218
+ from openai import OpenAI
219
+
220
+ # Sanity check
221
+ print("Python exe:", sys.executable)
222
+ import openai as _oa
223
+ print("OpenAI version:", _oa.__version__)
224
+ print("OpenAI path:", _oa.__file__)
225
+
226
+ # Instantiate the client
227
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
228
+
229
+ try:
230
+ resp = client.chat.completions.create(
231
+ model="gpt-4o-mini",
232
+ messages=[
233
+ {"role": "system", "content": "You are a helpful assistant."},
234
+ {"role": "user", "content": f"Create an interview plan for the following job description. Highlight key skills and requirements:\n{job_description}"},
235
+ ],
236
+ max_tokens=150
237
+ )
238
+
239
+ # Extract and return the generated text
240
+ return resp.choices[0].message.content.strip()
241
+
242
+ except Exception as e:
243
+ # This will catch everything, including rate‑limit/quota errors
244
+ print(f"Error ({type(e).__name__}): {e}")
245
+ # Optionally, if it’s a JSON‑style API error you can introspect:
246
+ try:
247
+ err = e.error if hasattr(e, "error") else None
248
+ print("Error details:", err)
249
+ except:
250
+ pass
251
+ return "Error generating interview plan. Please try again later."
252
+
253
+ @app.route('/delete_job/<int:job_id>', methods=['POST'])
254
+ def delete_job(job_id):
255
+ job = Job.query.get_or_404(job_id)
256
+ app.logger.info("Deleting job", extra={
257
+ 'job_id': job_id,
258
+ 'company': job.company,
259
+ 'position': job.position
260
+ })
261
+
262
+ try:
263
+ db.session.delete(job)
264
+ db.session.commit()
265
+ app.logger.info("Successfully deleted job", extra={
266
+ 'job_id': job_id
267
+ })
268
+ except SQLAlchemyError as e:
269
+ app.logger.error("Failed to delete job", exc_info=True, extra={
270
+ 'job_id': job_id,
271
+ 'error': str(e)
272
+ })
273
+ db.session.rollback()
274
+ flash(f"Error deleting job: {e}")
275
+
276
+ return redirect(url_for('jobs'))
277
+
278
+ @app.route('/api/chat', methods=['POST'])
279
+ def api_chat():
280
+ logging.info("[PrepMe] Received chat request")
281
+ data = request.get_json() or {}
282
+ user_msg = data.get('message', '').strip()
283
+
284
+ logging.info(f"[PrepMe] User message: {user_msg}")
285
+
286
+ if not user_msg:
287
+ logging.warning("[PrepMe] Empty message received")
288
+ return jsonify(response="Please type something!")
289
+
290
+ try:
291
+ # You can preload system/context messages here as needed
292
+ messages = [
293
+ {"role": "system", "content": "You are a helpful career coach."},
294
+ {"role": "user", "content": user_msg}
295
+ ]
296
+
297
+ logging.info("[PrepMe] Calling OpenAI API")
298
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
299
+
300
+ chat = client.chat.completions.create(
301
+ model="gpt-4o-mini",
302
+ messages=messages,
303
+ max_tokens=150
304
+ )
305
+ reply = chat.choices[0].message.content.strip()
306
+ logging.info(f"[PrepMe] OpenAI response: {reply}")
307
+
308
+ return jsonify(response=reply)
309
+
310
+ except Exception as e:
311
+ error_msg = f"Error: {str(e)}"
312
+ logging.error(f"[PrepMe] Chat error: {error_msg}")
313
+ return jsonify(response=error_msg)
314
+
315
+ @app.route('/prepme/<int:job_id>', methods=['GET'])
316
+ def prepme(job_id):
317
+ job = Job.query.get_or_404(job_id)
318
+
319
+ # Load the resume from the uploads directory
320
+ resume_path = os.path.join('uploads', 'resume.docx')
321
+ resume_content = ""
322
+ if os.path.exists(resume_path):
323
+ import docx
324
+ doc = docx.Document(resume_path)
325
+ resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
326
+
327
+ # Initial context for the chatbot
328
+ initial_context = f"Job Description:\n{job.job_description}\n\nResume:\n{resume_content}"
329
+
330
+ # Generate initial LLM response
331
+ try:
332
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
333
+ #logging.info(f"Initial context for OpenAI API: {initial_context}")
334
+ response = client.chat.completions.create(
335
+ model="gpt-4o-mini",
336
+ messages=[
337
+ {"role": "system", "content": "You are a career coach."},
338
+ {"role": "user", "content": f"Based on the following context, provide an initial response to help the user prepare for this job:\n{initial_context}"},
339
+ ],
340
+ max_tokens=150
341
+ )
342
+ initial_response = response.choices[0].message.content.strip()
343
+ except Exception as e:
344
+ logging.error(f"Error generating response from OpenAI API: {e}")
345
+ initial_response = "Error generating response. Please try again later."
346
+
347
+ return render_template('prepme.html', job=job, initial_context=initial_context, initial_response=initial_response)
348
+
349
+ # Endpoint to download jobs as CSV
350
+ @app.route('/download')
351
+ def download_jobs():
352
+ search_query = request.args.get('search', '').strip()
353
+ if search_query:
354
+ filter_cond = or_(
355
+ Job.company.ilike(f"%{search_query}%"),
356
+ Job.position.ilike(f"%{search_query}%"),
357
+ Job.status.ilike(f"%{search_query}%")
358
+ )
359
+ jobs_list = Job.query.filter(filter_cond).all()
360
+ else:
361
+ jobs_list = Job.query.all()
362
+ # Prepare data for CSV
363
+ data = [
364
+ {'Company': job.company,
365
+ 'Position': job.position,
366
+ 'Resume Used': job.resume_used,
367
+ 'Date Applied': job.date_applied,
368
+ 'Status': job.status,
369
+ 'Interview Details': job.interview_details,
370
+ 'Comments': job.comments,
371
+ 'Link': job.link,
372
+ 'Job Description': job.job_description}
373
+ for job in jobs_list
374
+ ]
375
+ df = pd.DataFrame(data)
376
+ csv_data = df.to_csv(index=False)
377
+ return Response(
378
+ csv_data,
379
+ mimetype='text/csv',
380
+ headers={"Content-Disposition": "attachment;filename=jobs.csv"}
381
+ )
382
+
383
+ if __name__ == "__main__":
384
+ # Convert Excel serial dates before first request
385
+ with app.app_context():
386
+ convert_excel_serial_dates()
387
+ app.run(host="0.0.0.0", port=5000, debug=True)
bfg-1.15.0.jar ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:dfe2885adc2916379093f02a80181200536856c9a987bf21c492e452adefef7a
3
+ size 14721936
myfirstcode.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from flask import Flask, request, jsonify
2
+
3
+ app = Flask(__name__)
4
+
5
+ # Define the addition function
6
+ def add_numbers(a, b):
7
+ return a + b
8
+
9
+ # Create an API route
10
+ @app.route('/add', methods=['POST'])
11
+ def add():
12
+ data = request.get_json() # get JSON data from the request
13
+ a = data.get('a')
14
+ b = data.get('b')
15
+
16
+ # Check if both a and b are provided
17
+ if a is None or b is None:
18
+ return jsonify({"error": "Please provide both 'a' and 'b' numbers."}), 400
19
+
20
+ try:
21
+ a = float(a)
22
+ b = float(b)
23
+ except ValueError:
24
+ return jsonify({"error": "Inputs must be numbers."}), 400
25
+
26
+ result = add_numbers(a, b)
27
+
28
+ return jsonify({"sum": result})
29
+
30
+ # Run the app
31
+ if __name__ == '__main__':
32
+ app.run(debug=True)
prepme_gui.py ADDED
File without changes
requirements.txt ADDED
@@ -0,0 +1,9 @@
 
 
 
 
 
 
 
 
 
 
1
+ Flask>=2.2.0
2
+ Flask-SQLAlchemy>=3.0.0
3
+ Flask-Migrate>=4.0.0
4
+ SQLAlchemy>=1.4.0
5
+ python-dotenv>=0.21.0
6
+ openai>=0.27.0
7
+ pandas>=1.3.0
8
+ python-docx>=0.8.10
9
+ requests>=2.28.0
test.py ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os, sys
2
+ from openai import OpenAI
3
+
4
+ # Sanity check
5
+ print("Python exe:", sys.executable)
6
+ import openai as _oa
7
+ print("OpenAI version:", _oa.__version__)
8
+ print("OpenAI path:", _oa.__file__)
9
+
10
+ # Instantiate the client
11
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
12
+
13
+ try:
14
+ resp = client.chat.completions.create(
15
+ model="gpt-4o-mini",
16
+ messages=[
17
+ {"role": "system", "content": "You are a career coach."},
18
+ {"role": "user", "content": "Hi there! How can I improve my resume?"}
19
+ ],
20
+ max_tokens=50
21
+ )
22
+ print("Assistant:", resp.choices[0].message.content.strip())
23
+
24
+ except Exception as e:
25
+ # This will catch everything, including rate‑limit/quota errors
26
+ print(f"Error ({type(e).__name__}): {e}")
27
+ # Optionally, if it’s a JSON‑style API error you can introspect:
28
+ try:
29
+ err = e.error if hasattr(e, "error") else None
30
+ print("Error details:", err)
31
+ except:
32
+ pass