CareerCraft / app.py
middha's picture
Update app.py
8bf55ad verified
import logging
from flask import Flask, render_template, request, redirect, url_for, flash
from flask_sqlalchemy import SQLAlchemy
import os
import pandas as pd
from werkzeug.utils import secure_filename
import openpyxl
from sqlalchemy.exc import SQLAlchemyError
import openai, sys # Import OpenAI library
from dotenv import load_dotenv
from flask_migrate import Migrate
from openai import OpenAI
from flask import Flask, render_template, request, redirect, url_for, flash, jsonify
got
# Load environment variables from .env file
load_dotenv()
# Set your OpenAI API key from the .env file
openai.api_key = os.getenv("OPENAI_API_KEY")
# Configure logging
#logging.basicConfig(level=logging.DEBUG, format='%(asctime)s - %(levelname)s - %(message)s', filename='app.log', filemode='a')
logging.basicConfig(level=logging.DEBUG, format="%(asctime)s - %(levelname)s - %(message)s"
)
app = Flask(__name__)
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///jobs.db'
app.config['SQLALCHEMY_TRACK_MODIFICATIONS'] = False
app.config['UPLOAD_FOLDER'] = 'uploads'
app.secret_key = 'your_secret_key_here'
# Ensure the upload folder exists
os.makedirs(app.config['UPLOAD_FOLDER'], exist_ok=True)
db = SQLAlchemy(app)
# Initialize Flask-Migrate
migrate = Migrate(app, db)
class Job(db.Model):
id = db.Column(db.Integer, primary_key=True)
company = db.Column(db.String(100), nullable=False)
position = db.Column(db.String(200), nullable=False)
resume_used = db.Column(db.String(200))
date_applied = db.Column(db.String(20))
status = db.Column(db.String(50))
interview_details = db.Column(db.Text)
comments = db.Column(db.Text)
link = db.Column(db.String(300))
job_description = db.Column(db.Text) # New field for job description
def __repr__(self):
return f"<Job {self.company} - {self.position}>"
# Add logging to track application flow
logging.info("Starting Flask application")
@app.route('/')
def index():
return redirect('/jobs')
@app.route('/jobs', methods=['GET', 'POST'])
def jobs():
try:
if request.method == 'POST':
logging.info("Received POST request to add a new job")
new_job = Job(
company=request.form['company'],
position=request.form['position'],
resume_used=request.form.get('resume_used'),
date_applied=request.form.get('date_applied'),
status=request.form.get('status'),
interview_details=request.form.get('interview_details'),
comments=request.form.get('comments'),
link=request.form.get('link')
)
db.session.add(new_job)
db.session.commit()
logging.info(f"Added new job: {new_job}")
return redirect(url_for('jobs'))
logging.info("Fetching all jobs from the database")
all_jobs = Job.query.all()
logging.debug(f"Retrieved jobs: {all_jobs}")
return render_template('jobs.html', jobs=all_jobs)
except SQLAlchemyError as e:
logging.error(f"Database error: {e}")
flash(f"Database error: {e}")
return render_template('jobs.html', jobs=[])
@app.route('/edit_job/<int:job_id>', methods=['GET', 'POST'])
def edit_job(job_id):
job = Job.query.get_or_404(job_id)
if request.method == 'POST':
# Update the job details
job.company = request.form['company']
job.position = request.form['position']
job.resume_used = request.form.get('resume_used')
job.date_applied = request.form.get('date_applied')
job.status = request.form.get('status')
job.interview_details = request.form.get('interview_details')
job.comments = request.form.get('comments')
job.link = request.form.get('link')
job.job_description = request.form.get('job_description') # Update job description
db.session.commit()
# Call LLM to generate interview plan
if job.job_description:
interview_plan = generate_interview_plan(job.job_description)
flash(f"Interview Plan: {interview_plan}")
return redirect(url_for('jobs'))
return render_template('jobs.html', jobs=Job.query.all(), edit_job=job)
# Function to generate an interview plan using OpenAI's GPT model
def generate_interview_plan(job_description):
import os, sys
from openai import OpenAI
# Sanity check
print("Python exe:", sys.executable)
import openai as _oa
print("OpenAI version:", _oa.__version__)
print("OpenAI path:", _oa.__file__)
# Instantiate the client
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
try:
resp = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a helpful assistant."},
{"role": "user", "content": f"Create an interview plan for the following job description. Highlight key skills and requirements:\n{job_description}"},
],
max_tokens=150
)
# Extract and return the generated text
return resp.choices[0].message.content.strip()
except Exception as e:
# This will catch everything, including rate‑limit/quota errors
print(f"Error ({type(e).__name__}): {e}")
# Optionally, if it’s a JSON‑style API error you can introspect:
try:
err = e.error if hasattr(e, "error") else None
print("Error details:", err)
except:
pass
return "Error generating interview plan. Please try again later."
@app.route('/delete_job/<int:job_id>', methods=['POST'])
def delete_job(job_id):
job = Job.query.get_or_404(job_id)
db.session.delete(job)
db.session.commit()
return redirect(url_for('jobs'))
@app.route('/upload', methods=['POST'])
def upload():
logging.info("Received file upload request")
if 'file' not in request.files:
logging.warning("No file part in the request")
flash('No file part in the request')
return redirect(url_for('jobs'))
file = request.files['file']
if file.filename == '':
logging.warning("No file selected for upload")
flash('No selected file')
return redirect(url_for('jobs'))
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
filepath = os.path.join(app.config['UPLOAD_FOLDER'], filename)
file.save(filepath)
logging.info(f"File saved to {filepath}")
try:
if filename.endswith('.csv'):
data = pd.read_csv(filepath)
else:
data = pd.read_excel(filepath)
logging.info("Validating column headers")
required_columns = {'company', 'position', 'resume used', 'date applied', 'status', 'interview details', 'comments', 'link'}
file_columns = set(data.columns.str.strip().str.lower())
if not required_columns.issubset(file_columns):
missing_columns = required_columns - file_columns
logging.warning(f"Missing required columns: {missing_columns}")
flash(f"Missing required columns: {missing_columns}")
return redirect(url_for('jobs'))
logging.info("Normalizing column names and processing rows")
data.columns = data.columns.str.strip().str.lower()
# Handle column renaming and ignore unnecessary columns
column_mapping = {
'compay applied': 'company',
'position applied': 'position',
'resume used': 'resume used',
'date applied': 'date applied',
'status': 'status',
'interview details': 'interview details',
'comments': 'comments',
'link': 'link'
}
data.rename(columns=column_mapping, inplace=True)
# Drop unnecessary columns
data = data[[col for col in column_mapping.values() if col in data.columns]]
for _, row in data.iterrows():
new_job = Job(
company=row.get('company', ''),
position=row.get('position', ''),
resume_used=row.get('resume used', ''),
date_applied=row.get('date applied', ''),
status=row.get('status', ''),
interview_details=row.get('interview details', ''),
comments=row.get('comments', ''),
link=row.get('link', '')
)
db.session.add(new_job)
logging.info(f"Added job from file: {new_job}")
db.session.commit()
logging.info("File processed and data committed to the database")
flash('File uploaded and data imported successfully!')
return redirect(url_for('jobs'))
except Exception as e:
logging.error(f"Error processing file: {e}")
flash(f"Error processing file: {e}")
return redirect(url_for('jobs'))
@app.route('/validate_columns', methods=['GET'])
def validate_columns():
filepath = os.path.join(app.config['UPLOAD_FOLDER'], 'JobApplications.xlsx')
if not os.path.exists(filepath):
return "Excel file not found.", 404
# Read column names from the Excel sheet
workbook = openpyxl.load_workbook(filepath)
sheet = workbook.active
excel_columns = [cell.value.strip().lower() for cell in sheet[1] if cell.value]
# Define expected column names from the database
expected_columns = {'company', 'position', 'resume used', 'date applied', 'status', 'interview details', 'comments', 'link'}
# Compare columns
missing_columns = expected_columns - set(excel_columns)
extra_columns = set(excel_columns) - expected_columns
if missing_columns or extra_columns:
return f"Missing columns: {missing_columns}, Extra columns: {extra_columns}", 400
return "Column names are aligned.", 200
@app.route('/api/chat', methods=['POST'])
def api_chat():
data = request.get_json() or {}
user_msg = data.get('message', '').strip()
if not user_msg:
return jsonify(response="Please type something!") # guard empty
# You can preload system/context messages here as needed
messages = [
{"role": "system", "content": "You are a helpful career coach."},
{"role": "user", "content": user_msg}
]
try:
# use your OpenAI client exactly like in prepme()
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
chat = client.chat.completions.create(
model="gpt-4o-mini",
messages=messages,
max_tokens=150
)
reply = chat.choices[0].message.content.strip()
except Exception as e:
reply = f"Error: {str(e)}"
return jsonify(response=reply)
@app.route('/prepme/<int:job_id>', methods=['GET'])
def prepme(job_id):
job = Job.query.get_or_404(job_id)
# Load the resume from the uploads directory
resume_path = os.path.join(app.config['UPLOAD_FOLDER'], 'resume.docx')
resume_content = ""
if os.path.exists(resume_path):
import docx
doc = docx.Document(resume_path)
resume_content = "\n".join([paragraph.text for paragraph in doc.paragraphs])
# Initial context for the chatbot
initial_context = f"Job Description:\n{job.job_description}\n\nResume:\n{resume_content}"
# Generate initial LLM response
try:
client = OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
#logging.info(f"Initial context for OpenAI API: {initial_context}")
response = client.chat.completions.create(
model="gpt-4o-mini",
messages=[
{"role": "system", "content": "You are a career coach."},
{"role": "user", "content": f"Based on the following context, provide an initial response to help the user prepare for this job:\n{initial_context}"},
],
max_tokens=150
)
initial_response = response.choices[0].message.content.strip()
except Exception as e:
logging.error(f"Error generating response from OpenAI API: {e}")
initial_response = "Error generating response. Please try again later."
return render_template('prepme.html', job=job, initial_context=initial_context, initial_response=initial_response)
# Helper function to check allowed file extensions
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in {'csv', 'xlsx'}
if __name__ == '__main__':
# ensure DB tables exist (safe no‑op if already created)
with app.app_context():
db.create_all()
# bind to the port that HF Spaces exposes (defaults to 7860)
import os
port = int(os.environ.get("PORT", 7860))
app.run(host="0.0.0.0", port=port, debug=False)