middha commited on
Commit
82bed20
·
verified ·
1 Parent(s): 14353bf

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +406 -386
app.py CHANGED
@@ -1,387 +1,407 @@
1
- import logging
2
- from logging.handlers import RotatingFileHandler
3
- from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, Response
4
- from flask_sqlalchemy import SQLAlchemy
5
- import os
6
- from sqlalchemy.exc import SQLAlchemyError
7
- from sqlalchemy import or_
8
- import pandas as pd
9
- import openai, sys # Import OpenAI library
10
- from dotenv import load_dotenv
11
- from flask_migrate import Migrate
12
- from openai import OpenAI
13
- from datetime import datetime, timedelta
14
-
15
- # Load environment variables from .env file
16
- load_dotenv()
17
-
18
- # Set your OpenAI API key from the .env file
19
- openai.api_key = os.getenv("OPENAI_API_KEY")
20
-
21
- # Configure logging with rotating file handler
22
- handler = RotatingFileHandler('app.log', maxBytes=10000000, backupCount=5)
23
- handler.setFormatter(logging.Formatter(
24
- '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
25
- ))
26
- logging.basicConfig(
27
- level=logging.DEBUG,
28
- handlers=[handler]
29
- )
30
-
31
- # Add logger to Flask app
32
- app = Flask("Career Crafter")
33
- app.logger.addHandler(handler)
34
-
35
- app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///jobs.db'
36
- app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
37
- app.secret_key = 'your_secret_key_here'
38
-
39
- db = SQLAlchemy(app)
40
-
41
- # Initialize Flask-Migrate
42
- migrate = Migrate(app, db)
43
-
44
- class Job(db.Model):
45
- id = db.Column(db.Integer, primary_key=True)
46
- company = db.Column(db.String(100), nullable=False)
47
- position = db.Column(db.String(200), nullable=False)
48
- resume_used = db.Column(db.String(200))
49
- date_applied = db.Column(db.String(20))
50
- status = db.Column(db.String(50))
51
- interview_details = db.Column(db.Text)
52
- comments = db.Column(db.Text)
53
- link = db.Column(db.String(300))
54
- job_description = db.Column(db.Text) # New field for job description
55
-
56
- def __repr__(self):
57
- return f"<Job {self.company} - {self.position}>"
58
-
59
- # Add logging to track application flow
60
- logging.info("Starting Flask application")
61
-
62
- def convert_excel_serial_dates():
63
- """Convert any numeric Excel date serials in date_applied to ISO date strings."""
64
- updated = False
65
- for job in Job.query.all():
66
- da = job.date_applied
67
- if da and isinstance(da, (str,)) and da.isdigit():
68
- try:
69
- serial = int(da)
70
- dt = datetime(1899, 12, 30) + timedelta(days=serial)
71
- job.date_applied = dt.strftime('%Y-%m-%d')
72
- updated = True
73
- except ValueError:
74
- continue
75
- if updated:
76
- db.session.commit()
77
- app.logger.info('Converted Excel serial dates to ISO strings in database')
78
-
79
- @app.route('/')
80
- def index():
81
- return redirect('/jobs')
82
-
83
- @app.route('/jobs', methods=['GET', 'POST'])
84
- def jobs():
85
- try:
86
- if request.method == 'POST':
87
- app.logger.info("Received POST request to add a new job", extra={
88
- 'company': request.form['company'],
89
- 'position': request.form['position']
90
- })
91
- new_job = Job(
92
- company=request.form['company'],
93
- position=request.form['position'],
94
- resume_used=request.form.get('resume_used'),
95
- date_applied=request.form.get('date_applied'),
96
- status=request.form.get('status'),
97
- interview_details=request.form.get('interview_details'),
98
- comments=request.form.get('comments'),
99
- link=request.form.get('link')
100
- )
101
- db.session.add(new_job)
102
- db.session.commit()
103
- app.logger.info("Successfully added new job", extra={
104
- 'job_id': new_job.id,
105
- 'company': new_job.company,
106
- 'position': new_job.position
107
- })
108
- return redirect(url_for('jobs'))
109
-
110
- app.logger.debug("Fetching jobs from database")
111
- search_query = request.args.get('search', '').strip()
112
- sort_by = request.args.get('sort', '').strip()
113
- direction = request.args.get('direction', 'asc')
114
- # Read status filter from query args
115
- status_filter = request.args.get('status_filter', '').strip()
116
- # Build base query
117
- jobs_query = Job.query
118
- # Apply search filter
119
- if search_query:
120
- filter_cond = or_(
121
- Job.company.ilike(f"%{search_query}%"),
122
- Job.position.ilike(f"%{search_query}%"),
123
- Job.status.ilike(f"%{search_query}%")
124
- )
125
- jobs_query = jobs_query.filter(filter_cond)
126
- app.logger.info(f"Filtering jobs with search '{search_query}'", extra={'search_query': search_query})
127
- # Apply status filter
128
- valid_statuses = {'Applied', 'Rejected', 'Interviewing'}
129
- if status_filter in valid_statuses:
130
- jobs_query = jobs_query.filter_by(status=status_filter)
131
- app.logger.info(f"Filtering jobs by status '{status_filter}'", extra={'status_filter': status_filter})
132
- # Apply sorting with direction
133
- valid_sorts = {'position', 'date_applied', 'status'}
134
- if sort_by in valid_sorts:
135
- col = getattr(Job, sort_by)
136
- if direction == 'desc':
137
- jobs_query = jobs_query.order_by(col.desc())
138
- else:
139
- jobs_query = jobs_query.order_by(col.asc())
140
- app.logger.info(f"Sorting jobs by '{sort_by}' {direction}", extra={'sort_by': sort_by, 'direction': direction})
141
- # Execute query
142
- jobs_list = jobs_query.all()
143
- app.logger.info(f"Retrieved {len(jobs_list)} jobs", extra={ 'job_count': len(jobs_list), 'search_query': search_query, 'sort_by': sort_by })
144
-
145
- # Compute counts for Applied, Rejected, Interviewing
146
- applied_count = Job.query.filter_by(status='Applied').count()
147
- rejected_count = Job.query.filter_by(status='Rejected').count()
148
- interviewing_count = Job.query.filter_by(status='Interviewing').count()
149
- return render_template('jobs.html', jobs=jobs_list, search_query=search_query,
150
- applied_count=applied_count,
151
- rejected_count=rejected_count,
152
- interviewing_count=interviewing_count,
153
- sort_by=sort_by,
154
- direction=direction,
155
- status_filter=status_filter)
156
- except SQLAlchemyError as e:
157
- app.logger.error("Database error in jobs route", exc_info=True, extra={
158
- 'error': str(e)
159
- })
160
- flash(f"Database error: {e}")
161
- return render_template('jobs.html', jobs=[], search_query='',
162
- applied_count=0, rejected_count=0, interviewing_count=0,
163
- sort_by='', direction='asc', status_filter='')
164
-
165
- @app.route('/edit_job/<int:job_id>', methods=['GET', 'POST'])
166
- def edit_job(job_id):
167
- job = Job.query.get_or_404(job_id)
168
- app.logger.info("Accessing job for edit", extra={
169
- 'job_id': job_id,
170
- 'company': job.company,
171
- 'position': job.position
172
- })
173
-
174
- if request.method == 'POST':
175
- app.logger.info("Updating job details", extra={
176
- 'job_id': job_id,
177
- 'old_status': job.status,
178
- 'new_status': request.form.get('status')
179
- })
180
-
181
- # Update the job details
182
- job.company = request.form['company']
183
- job.position = request.form['position']
184
- job.resume_used = request.form.get('resume_used')
185
- job.date_applied = request.form.get('date_applied')
186
- job.status = request.form.get('status')
187
- job.interview_details = request.form.get('interview_details')
188
- job.comments = request.form.get('comments')
189
- job.link = request.form.get('link')
190
- job.job_description = request.form.get('job_description')
191
-
192
- try:
193
- db.session.commit()
194
- app.logger.info("Successfully updated job", extra={
195
- 'job_id': job_id,
196
- 'company': job.company
197
- })
198
-
199
- if job.job_description:
200
- app.logger.debug("Generating interview plan")
201
- interview_plan = generate_interview_plan(job.job_description)
202
- flash(f"Interview Plan: {interview_plan}")
203
-
204
- return redirect(url_for('jobs'))
205
- except SQLAlchemyError as e:
206
- app.logger.error("Failed to update job", exc_info=True, extra={
207
- 'job_id': job_id,
208
- 'error': str(e)
209
- })
210
- flash(f"Error updating job: {e}")
211
- db.session.rollback()
212
-
213
- return render_template('jobs.html', jobs=Job.query.all(), edit_job=job)
214
-
215
- # Function to generate an interview plan using OpenAI's GPT model
216
- def generate_interview_plan(job_description):
217
- import os, sys
218
- from openai import OpenAI
219
-
220
- # Sanity check
221
- print("Python exe:", sys.executable)
222
- import openai as _oa
223
- print("OpenAI version:", _oa.__version__)
224
- print("OpenAI path:", _oa.__file__)
225
-
226
- # Instantiate the client
227
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
228
-
229
- try:
230
- resp = client.chat.completions.create(
231
- model="gpt-4o-mini",
232
- messages=[
233
- {"role": "system", "content": "You are a helpful assistant."},
234
- {"role": "user", "content": f"Create an interview plan for the following job description. Highlight key skills and requirements:\n{job_description}"},
235
- ],
236
- max_tokens=150
237
- )
238
-
239
- # Extract and return the generated text
240
- return resp.choices[0].message.content.strip()
241
-
242
- except Exception as e:
243
- # This will catch everything, including rate‑limit/quota errors
244
- print(f"Error ({type(e).__name__}): {e}")
245
- # Optionally, if it’s a JSON‑style API error you can introspect:
246
- try:
247
- err = e.error if hasattr(e, "error") else None
248
- print("Error details:", err)
249
- except:
250
- pass
251
- return "Error generating interview plan. Please try again later."
252
-
253
- @app.route('/delete_job/<int:job_id>', methods=['POST'])
254
- def delete_job(job_id):
255
- job = Job.query.get_or_404(job_id)
256
- app.logger.info("Deleting job", extra={
257
- 'job_id': job_id,
258
- 'company': job.company,
259
- 'position': job.position
260
- })
261
-
262
- try:
263
- db.session.delete(job)
264
- db.session.commit()
265
- app.logger.info("Successfully deleted job", extra={
266
- 'job_id': job_id
267
- })
268
- except SQLAlchemyError as e:
269
- app.logger.error("Failed to delete job", exc_info=True, extra={
270
- 'job_id': job_id,
271
- 'error': str(e)
272
- })
273
- db.session.rollback()
274
- flash(f"Error deleting job: {e}")
275
-
276
- return redirect(url_for('jobs'))
277
-
278
- @app.route('/api/chat', methods=['POST'])
279
- def api_chat():
280
- logging.info("[PrepMe] Received chat request")
281
- data = request.get_json() or {}
282
- user_msg = data.get('message', '').strip()
283
-
284
- logging.info(f"[PrepMe] User message: {user_msg}")
285
-
286
- if not user_msg:
287
- logging.warning("[PrepMe] Empty message received")
288
- return jsonify(response="Please type something!")
289
-
290
- try:
291
- # You can preload system/context messages here as needed
292
- messages = [
293
- {"role": "system", "content": "You are a helpful career coach."},
294
- {"role": "user", "content": user_msg}
295
- ]
296
-
297
- logging.info("[PrepMe] Calling OpenAI API")
298
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
299
-
300
- chat = client.chat.completions.create(
301
- model="gpt-4o-mini",
302
- messages=messages,
303
- max_tokens=150
304
- )
305
- reply = chat.choices[0].message.content.strip()
306
- logging.info(f"[PrepMe] OpenAI response: {reply}")
307
-
308
- return jsonify(response=reply)
309
-
310
- except Exception as e:
311
- error_msg = f"Error: {str(e)}"
312
- logging.error(f"[PrepMe] Chat error: {error_msg}")
313
- return jsonify(response=error_msg)
314
-
315
- @app.route('/prepme/<int:job_id>', methods=['GET'])
316
- def prepme(job_id):
317
- job = Job.query.get_or_404(job_id)
318
-
319
- # Load the resume from the uploads directory
320
- resume_path = os.path.join('uploads', 'resume.docx')
321
- resume_content = ""
322
- if os.path.exists(resume_path):
323
- import docx
324
- doc = docx.Document(resume_path)
325
- resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
326
-
327
- # Initial context for the chatbot
328
- initial_context = f"Job Description:\n{job.job_description}\n\nResume:\n{resume_content}"
329
-
330
- # Generate initial LLM response
331
- try:
332
- client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
333
- #logging.info(f"Initial context for OpenAI API: {initial_context}")
334
- response = client.chat.completions.create(
335
- model="gpt-4o-mini",
336
- messages=[
337
- {"role": "system", "content": "You are a career coach."},
338
- {"role": "user", "content": f"Based on the following context, provide an initial response to help the user prepare for this job:\n{initial_context}"},
339
- ],
340
- max_tokens=150
341
- )
342
- initial_response = response.choices[0].message.content.strip()
343
- except Exception as e:
344
- logging.error(f"Error generating response from OpenAI API: {e}")
345
- initial_response = "Error generating response. Please try again later."
346
-
347
- return render_template('prepme.html', job=job, initial_context=initial_context, initial_response=initial_response)
348
-
349
- # Endpoint to download jobs as CSV
350
- @app.route('/download')
351
- def download_jobs():
352
- search_query = request.args.get('search', '').strip()
353
- if search_query:
354
- filter_cond = or_(
355
- Job.company.ilike(f"%{search_query}%"),
356
- Job.position.ilike(f"%{search_query}%"),
357
- Job.status.ilike(f"%{search_query}%")
358
- )
359
- jobs_list = Job.query.filter(filter_cond).all()
360
- else:
361
- jobs_list = Job.query.all()
362
- # Prepare data for CSV
363
- data = [
364
- {'Company': job.company,
365
- 'Position': job.position,
366
- 'Resume Used': job.resume_used,
367
- 'Date Applied': job.date_applied,
368
- 'Status': job.status,
369
- 'Interview Details': job.interview_details,
370
- 'Comments': job.comments,
371
- 'Link': job.link,
372
- 'Job Description': job.job_description}
373
- for job in jobs_list
374
- ]
375
- df = pd.DataFrame(data)
376
- csv_data = df.to_csv(index=False)
377
- return Response(
378
- csv_data,
379
- mimetype='text/csv',
380
- headers={"Content-Disposition": "attachment;filename=jobs.csv"}
381
- )
382
-
383
- if __name__ == "__main__":
384
- # Convert Excel serial dates before first request
385
- with app.app_context():
386
- convert_excel_serial_dates()
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
387
  app.run(host="0.0.0.0", port=5000, debug=True)
 
1
+ import logging
2
+ from logging.handlers import RotatingFileHandler
3
+ from flask import Flask, render_template, request, redirect, url_for, flash, jsonify, Response
4
+ from flask_sqlalchemy import SQLAlchemy
5
+ import os
6
+ from sqlalchemy.exc import SQLAlchemyError
7
+ from sqlalchemy import or_
8
+ import pandas as pd
9
+ import openai, sys # Import OpenAI library
10
+ from dotenv import load_dotenv
11
+ from flask_migrate import Migrate
12
+ from openai import OpenAI
13
+ from datetime import datetime, timedelta
14
+
15
+ # Load environment variables from .env file
16
+ load_dotenv()
17
+ # at the top of app.py, after imports
18
+ from logging.handlers import RotatingFileHandler
19
+ import os
20
+
21
+ # ensure /tmp exists
22
+ os.makedirs("/tmp/logs", exist_ok=True)
23
+
24
+ handler = RotatingFileHandler(
25
+ "/tmp/logs/app.log",
26
+ maxBytes=10_000_000,
27
+ backupCount=5
28
+ )
29
+ handler.setFormatter(logging.Formatter(
30
+ '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
31
+ ))
32
+ logging.basicConfig(
33
+ level=logging.DEBUG,
34
+ handlers=[handler]
35
+ )
36
+
37
+
38
+ # Set your OpenAI API key from the .env file
39
+ openai.api_key = os.getenv("OPENAI_API_KEY")
40
+
41
+ # Configure logging with rotating file handler
42
+ handler = RotatingFileHandler('app.log', maxBytes=10000000, backupCount=5)
43
+ handler.setFormatter(logging.Formatter(
44
+ '[%(asctime)s] %(levelname)s in %(module)s: %(message)s'
45
+ ))
46
+ logging.basicConfig(
47
+ level=logging.DEBUG,
48
+ handlers=[handler]
49
+ )
50
+
51
+ # Add logger to Flask app
52
+ app = Flask("Career Crafter")
53
+ app.logger.addHandler(handler)
54
+
55
+ app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///jobs.db'
56
+ app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
57
+ app.secret_key = 'your_secret_key_here'
58
+
59
+ db = SQLAlchemy(app)
60
+
61
+ # Initialize Flask-Migrate
62
+ migrate = Migrate(app, db)
63
+
64
+ class Job(db.Model):
65
+ id = db.Column(db.Integer, primary_key=True)
66
+ company = db.Column(db.String(100), nullable=False)
67
+ position = db.Column(db.String(200), nullable=False)
68
+ resume_used = db.Column(db.String(200))
69
+ date_applied = db.Column(db.String(20))
70
+ status = db.Column(db.String(50))
71
+ interview_details = db.Column(db.Text)
72
+ comments = db.Column(db.Text)
73
+ link = db.Column(db.String(300))
74
+ job_description = db.Column(db.Text) # New field for job description
75
+
76
+ def __repr__(self):
77
+ return f"<Job {self.company} - {self.position}>"
78
+
79
+ # Add logging to track application flow
80
+ logging.info("Starting Flask application")
81
+
82
+ def convert_excel_serial_dates():
83
+ """Convert any numeric Excel date serials in date_applied to ISO date strings."""
84
+ updated = False
85
+ for job in Job.query.all():
86
+ da = job.date_applied
87
+ if da and isinstance(da, (str,)) and da.isdigit():
88
+ try:
89
+ serial = int(da)
90
+ dt = datetime(1899, 12, 30) + timedelta(days=serial)
91
+ job.date_applied = dt.strftime('%Y-%m-%d')
92
+ updated = True
93
+ except ValueError:
94
+ continue
95
+ if updated:
96
+ db.session.commit()
97
+ app.logger.info('Converted Excel serial dates to ISO strings in database')
98
+
99
+ @app.route('/')
100
+ def index():
101
+ return redirect('/jobs')
102
+
103
+ @app.route('/jobs', methods=['GET', 'POST'])
104
+ def jobs():
105
+ try:
106
+ if request.method == 'POST':
107
+ app.logger.info("Received POST request to add a new job", extra={
108
+ 'company': request.form['company'],
109
+ 'position': request.form['position']
110
+ })
111
+ new_job = Job(
112
+ company=request.form['company'],
113
+ position=request.form['position'],
114
+ resume_used=request.form.get('resume_used'),
115
+ date_applied=request.form.get('date_applied'),
116
+ status=request.form.get('status'),
117
+ interview_details=request.form.get('interview_details'),
118
+ comments=request.form.get('comments'),
119
+ link=request.form.get('link')
120
+ )
121
+ db.session.add(new_job)
122
+ db.session.commit()
123
+ app.logger.info("Successfully added new job", extra={
124
+ 'job_id': new_job.id,
125
+ 'company': new_job.company,
126
+ 'position': new_job.position
127
+ })
128
+ return redirect(url_for('jobs'))
129
+
130
+ app.logger.debug("Fetching jobs from database")
131
+ search_query = request.args.get('search', '').strip()
132
+ sort_by = request.args.get('sort', '').strip()
133
+ direction = request.args.get('direction', 'asc')
134
+ # Read status filter from query args
135
+ status_filter = request.args.get('status_filter', '').strip()
136
+ # Build base query
137
+ jobs_query = Job.query
138
+ # Apply search filter
139
+ if search_query:
140
+ filter_cond = or_(
141
+ Job.company.ilike(f"%{search_query}%"),
142
+ Job.position.ilike(f"%{search_query}%"),
143
+ Job.status.ilike(f"%{search_query}%")
144
+ )
145
+ jobs_query = jobs_query.filter(filter_cond)
146
+ app.logger.info(f"Filtering jobs with search '{search_query}'", extra={'search_query': search_query})
147
+ # Apply status filter
148
+ valid_statuses = {'Applied', 'Rejected', 'Interviewing'}
149
+ if status_filter in valid_statuses:
150
+ jobs_query = jobs_query.filter_by(status=status_filter)
151
+ app.logger.info(f"Filtering jobs by status '{status_filter}'", extra={'status_filter': status_filter})
152
+ # Apply sorting with direction
153
+ valid_sorts = {'position', 'date_applied', 'status'}
154
+ if sort_by in valid_sorts:
155
+ col = getattr(Job, sort_by)
156
+ if direction == 'desc':
157
+ jobs_query = jobs_query.order_by(col.desc())
158
+ else:
159
+ jobs_query = jobs_query.order_by(col.asc())
160
+ app.logger.info(f"Sorting jobs by '{sort_by}' {direction}", extra={'sort_by': sort_by, 'direction': direction})
161
+ # Execute query
162
+ jobs_list = jobs_query.all()
163
+ app.logger.info(f"Retrieved {len(jobs_list)} jobs", extra={ 'job_count': len(jobs_list), 'search_query': search_query, 'sort_by': sort_by })
164
+
165
+ # Compute counts for Applied, Rejected, Interviewing
166
+ applied_count = Job.query.filter_by(status='Applied').count()
167
+ rejected_count = Job.query.filter_by(status='Rejected').count()
168
+ interviewing_count = Job.query.filter_by(status='Interviewing').count()
169
+ return render_template('jobs.html', jobs=jobs_list, search_query=search_query,
170
+ applied_count=applied_count,
171
+ rejected_count=rejected_count,
172
+ interviewing_count=interviewing_count,
173
+ sort_by=sort_by,
174
+ direction=direction,
175
+ status_filter=status_filter)
176
+ except SQLAlchemyError as e:
177
+ app.logger.error("Database error in jobs route", exc_info=True, extra={
178
+ 'error': str(e)
179
+ })
180
+ flash(f"Database error: {e}")
181
+ return render_template('jobs.html', jobs=[], search_query='',
182
+ applied_count=0, rejected_count=0, interviewing_count=0,
183
+ sort_by='', direction='asc', status_filter='')
184
+
185
+ @app.route('/edit_job/<int:job_id>', methods=['GET', 'POST'])
186
+ def edit_job(job_id):
187
+ job = Job.query.get_or_404(job_id)
188
+ app.logger.info("Accessing job for edit", extra={
189
+ 'job_id': job_id,
190
+ 'company': job.company,
191
+ 'position': job.position
192
+ })
193
+
194
+ if request.method == 'POST':
195
+ app.logger.info("Updating job details", extra={
196
+ 'job_id': job_id,
197
+ 'old_status': job.status,
198
+ 'new_status': request.form.get('status')
199
+ })
200
+
201
+ # Update the job details
202
+ job.company = request.form['company']
203
+ job.position = request.form['position']
204
+ job.resume_used = request.form.get('resume_used')
205
+ job.date_applied = request.form.get('date_applied')
206
+ job.status = request.form.get('status')
207
+ job.interview_details = request.form.get('interview_details')
208
+ job.comments = request.form.get('comments')
209
+ job.link = request.form.get('link')
210
+ job.job_description = request.form.get('job_description')
211
+
212
+ try:
213
+ db.session.commit()
214
+ app.logger.info("Successfully updated job", extra={
215
+ 'job_id': job_id,
216
+ 'company': job.company
217
+ })
218
+
219
+ if job.job_description:
220
+ app.logger.debug("Generating interview plan")
221
+ interview_plan = generate_interview_plan(job.job_description)
222
+ flash(f"Interview Plan: {interview_plan}")
223
+
224
+ return redirect(url_for('jobs'))
225
+ except SQLAlchemyError as e:
226
+ app.logger.error("Failed to update job", exc_info=True, extra={
227
+ 'job_id': job_id,
228
+ 'error': str(e)
229
+ })
230
+ flash(f"Error updating job: {e}")
231
+ db.session.rollback()
232
+
233
+ return render_template('jobs.html', jobs=Job.query.all(), edit_job=job)
234
+
235
+ # Function to generate an interview plan using OpenAI's GPT model
236
+ def generate_interview_plan(job_description):
237
+ import os, sys
238
+ from openai import OpenAI
239
+
240
+ # Sanity check
241
+ print("Python exe:", sys.executable)
242
+ import openai as _oa
243
+ print("OpenAI version:", _oa.__version__)
244
+ print("OpenAI path:", _oa.__file__)
245
+
246
+ # Instantiate the client
247
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
248
+
249
+ try:
250
+ resp = client.chat.completions.create(
251
+ model="gpt-4o-mini",
252
+ messages=[
253
+ {"role": "system", "content": "You are a helpful assistant."},
254
+ {"role": "user", "content": f"Create an interview plan for the following job description. Highlight key skills and requirements:\n{job_description}"},
255
+ ],
256
+ max_tokens=150
257
+ )
258
+
259
+ # Extract and return the generated text
260
+ return resp.choices[0].message.content.strip()
261
+
262
+ except Exception as e:
263
+ # This will catch everything, including rate‑limit/quota errors
264
+ print(f"Error ({type(e).__name__}): {e}")
265
+ # Optionally, if it’s a JSON‑style API error you can introspect:
266
+ try:
267
+ err = e.error if hasattr(e, "error") else None
268
+ print("Error details:", err)
269
+ except:
270
+ pass
271
+ return "Error generating interview plan. Please try again later."
272
+
273
+ @app.route('/delete_job/<int:job_id>', methods=['POST'])
274
+ def delete_job(job_id):
275
+ job = Job.query.get_or_404(job_id)
276
+ app.logger.info("Deleting job", extra={
277
+ 'job_id': job_id,
278
+ 'company': job.company,
279
+ 'position': job.position
280
+ })
281
+
282
+ try:
283
+ db.session.delete(job)
284
+ db.session.commit()
285
+ app.logger.info("Successfully deleted job", extra={
286
+ 'job_id': job_id
287
+ })
288
+ except SQLAlchemyError as e:
289
+ app.logger.error("Failed to delete job", exc_info=True, extra={
290
+ 'job_id': job_id,
291
+ 'error': str(e)
292
+ })
293
+ db.session.rollback()
294
+ flash(f"Error deleting job: {e}")
295
+
296
+ return redirect(url_for('jobs'))
297
+
298
+ @app.route('/api/chat', methods=['POST'])
299
+ def api_chat():
300
+ logging.info("[PrepMe] Received chat request")
301
+ data = request.get_json() or {}
302
+ user_msg = data.get('message', '').strip()
303
+
304
+ logging.info(f"[PrepMe] User message: {user_msg}")
305
+
306
+ if not user_msg:
307
+ logging.warning("[PrepMe] Empty message received")
308
+ return jsonify(response="Please type something!")
309
+
310
+ try:
311
+ # You can preload system/context messages here as needed
312
+ messages = [
313
+ {"role": "system", "content": "You are a helpful career coach."},
314
+ {"role": "user", "content": user_msg}
315
+ ]
316
+
317
+ logging.info("[PrepMe] Calling OpenAI API")
318
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
319
+
320
+ chat = client.chat.completions.create(
321
+ model="gpt-4o-mini",
322
+ messages=messages,
323
+ max_tokens=150
324
+ )
325
+ reply = chat.choices[0].message.content.strip()
326
+ logging.info(f"[PrepMe] OpenAI response: {reply}")
327
+
328
+ return jsonify(response=reply)
329
+
330
+ except Exception as e:
331
+ error_msg = f"Error: {str(e)}"
332
+ logging.error(f"[PrepMe] Chat error: {error_msg}")
333
+ return jsonify(response=error_msg)
334
+
335
+ @app.route('/prepme/<int:job_id>', methods=['GET'])
336
+ def prepme(job_id):
337
+ job = Job.query.get_or_404(job_id)
338
+
339
+ # Load the resume from the uploads directory
340
+ resume_path = os.path.join('uploads', 'resume.docx')
341
+ resume_content = ""
342
+ if os.path.exists(resume_path):
343
+ import docx
344
+ doc = docx.Document(resume_path)
345
+ resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
346
+
347
+ # Initial context for the chatbot
348
+ initial_context = f"Job Description:\n{job.job_description}\n\nResume:\n{resume_content}"
349
+
350
+ # Generate initial LLM response
351
+ try:
352
+ client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
353
+ #logging.info(f"Initial context for OpenAI API: {initial_context}")
354
+ response = client.chat.completions.create(
355
+ model="gpt-4o-mini",
356
+ messages=[
357
+ {"role": "system", "content": "You are a career coach."},
358
+ {"role": "user", "content": f"Based on the following context, provide an initial response to help the user prepare for this job:\n{initial_context}"},
359
+ ],
360
+ max_tokens=150
361
+ )
362
+ initial_response = response.choices[0].message.content.strip()
363
+ except Exception as e:
364
+ logging.error(f"Error generating response from OpenAI API: {e}")
365
+ initial_response = "Error generating response. Please try again later."
366
+
367
+ return render_template('prepme.html', job=job, initial_context=initial_context, initial_response=initial_response)
368
+
369
+ # Endpoint to download jobs as CSV
370
+ @app.route('/download')
371
+ def download_jobs():
372
+ search_query = request.args.get('search', '').strip()
373
+ if search_query:
374
+ filter_cond = or_(
375
+ Job.company.ilike(f"%{search_query}%"),
376
+ Job.position.ilike(f"%{search_query}%"),
377
+ Job.status.ilike(f"%{search_query}%")
378
+ )
379
+ jobs_list = Job.query.filter(filter_cond).all()
380
+ else:
381
+ jobs_list = Job.query.all()
382
+ # Prepare data for CSV
383
+ data = [
384
+ {'Company': job.company,
385
+ 'Position': job.position,
386
+ 'Resume Used': job.resume_used,
387
+ 'Date Applied': job.date_applied,
388
+ 'Status': job.status,
389
+ 'Interview Details': job.interview_details,
390
+ 'Comments': job.comments,
391
+ 'Link': job.link,
392
+ 'Job Description': job.job_description}
393
+ for job in jobs_list
394
+ ]
395
+ df = pd.DataFrame(data)
396
+ csv_data = df.to_csv(index=False)
397
+ return Response(
398
+ csv_data,
399
+ mimetype='text/csv',
400
+ headers={"Content-Disposition": "attachment;filename=jobs.csv"}
401
+ )
402
+
403
+ if __name__ == "__main__":
404
+ # Convert Excel serial dates before first request
405
+ with app.app_context():
406
+ convert_excel_serial_dates()
407
  app.run(host="0.0.0.0", port=5000, debug=True)