Recruit-Edge / app.py
Mr-Thop's picture
Update app.py
e53f9a7 verified
from flask import Flask,request, jsonify,send_from_directory
from fastapi import FastAPI, Form, File, UploadFile
from fastapi.responses import JSONResponse
from fastapi.middleware.cors import CORSMiddleware
from fastapi import HTTPException
from fastapi import BackgroundTasks
from pydantic import BaseModel
from typing import Optional
import json
import asyncio
import random
import psycopg2
from pypdf import PdfReader
from langchain_google_genai import ChatGoogleGenerativeAI, GoogleGenerativeAIEmbeddings
from google import genai
from langchain_postgres import PGVector
from langchain_core.documents import Document
import pymysql
import pymysql.cursors
from flask_cors import CORS
import os
from datetime import datetime, timedelta
from flask import Flask, request, jsonify
import pandas as pd
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
from sendgrid import SendGridAPIClient
from sendgrid.helpers.mail import Mail
from google.oauth2 import service_account
import pickle
from googleapiclient.discovery import build
import logging
from werkzeug.utils import secure_filename
from sentence_transformers import SentenceTransformer, util
import time
from typing import List
import shutil
from io import BytesIO
#CONSTANTS
GOOGLE_API_KEY_P = os.getenv("GOOGLE_API_KEY_P")
GOOGLE_API_KEY_C = os.getenv("GOOGLE_API_KEY_C")
CONNECTION_STRING = os.getenv("CONNECTION_STRING")
COLLECTION_NAME_EMPLOYEE = 'TBH_resume'
COLLECTION_TEAM = 'TBH_Project'
SYSTEM_PROMPT = os.getenv("SYSTEM_PROMPT")
SYSTEM_PROMPT_R = os.getenv("SYSTEM_PROMPT_R")
SYSTEM = os.getenv("SYSTEM")
embeddings = GoogleGenerativeAIEmbeddings(google_api_key=GOOGLE_API_KEY_P, model="models/text-embedding-004")
db = PGVector(embeddings, connection=CONNECTION_STRING, collection_name=COLLECTION_NAME_EMPLOYEE, use_jsonb=True)
def conn():
connection = psycopg2.connect(CONNECTION_STRING)
cur = connection.cursor()
# Clear all rows from the collection
cur.execute("DELETE FROM public.langchain_pg_embedding;")
cur.execute("DELETE FROM public.langchain_pg_collection;")
cur.execute('DELETE FROM public."TBH_RESUME";')
connection.commit()
print("Deleted")
cur.close()
connection.close()
# Adding documents (resumes) to vector database
def add_doc(documents):
try:
db.add_documents(documents)
print("Documents added Successfully")
except Exception as e:
print(e)
# Async function to match Employees
async def employee_matching(job_skills, min_selection):
try:
results = db.similarity_search(job_skills, k=min_selection)
return results
except Exception as e:
logging.exception("Error during employee matching",e)
return []
# Initialise AI agent
def initialize_model(client, prompt):
chat = client.chats.create(model="gemini-2.5-flash")
response = chat.send_message(prompt)
return chat
# Converting string to JSON safely
def jsond(text):
if not text.strip():
raise ValueError("Empty response text received from the AI service.")
start_index = text.find("{")
end_index = text.rfind("}")
if start_index == -1 or end_index == -1:
raise ValueError(f"Response text does not contain valid JSON delimiters: {text}")
try:
clean_text = json.loads(text[start_index:end_index+1])
except Exception as e:
raise ValueError(f"Error parsing JSON from response text: {e} | Raw response: {text}")
return clean_text
# Extracting Job Strings from the Prompt
def posting(chat, prompt):
response = chat.send_message(prompt)
raw_response = response.text
print("Raw AI response from posting:", raw_response)
try:
clean_text = jsond(raw_response)
except Exception as e:
print("Error parsing JSON:", e)
raise
# Fallback: If 'type' key is missing, check if 'output' exists directly.
if "type" not in clean_text and "output" in clean_text:
print("Fallback: 'type' key missing, but 'output' is present.")
clean_text["type"] = "output"
if "type" not in clean_text:
print("DEBUG: Parsed JSON from AI response:", clean_text)
raise ValueError(f"Expected key 'type' not found in response: {clean_text}")
if clean_text["type"].lower() == "output":
return response
else:
print("DEBUG: Response 'type' is not output, received:", clean_text.get("type"))
raise ValueError(f"Unexpected 'type' value in response: {clean_text}")
# Asynchronous chat functions
async def chats(chat, prompt):
response = chat.send_message(prompt)
clean_text = jsond(response.text)
if clean_text["type"].lower() in ["conversation", "plan"]:
response = await chats(chat, response.text)
elif clean_text["type"].lower() == "output":
return response
else:
return response
return response
async def chats_r(chat, prompt):
response = chat.send_message(prompt)
clean_text = jsond(response.text)
if "state" in clean_text.keys():
if clean_text["state"].lower() == "output":
return clean_text
elif "type" in clean_text.keys():
if clean_text["type"].lower() == "output":
return clean_text
else:
await chats_r(chat, clean_text)
return jsond(response.text)
# Helper to parse a PDF file from an uploaded file object
def parse_resume_file(file_obj, role, skillset):
resume_text = []
reader = PdfReader(file_obj)
for page in reader.pages:
resume_text.append(page.extract_text())
resume_data = {
"type": "input",
"data": resume_text,
"role": role,
"Required Skills" : skillset
}
return resume_data
# Asynchronous function to convert unstructured resume to structured format using file object
async def un_to_st_file(file_obj, role, skillset, chat):
unstructured = parse_resume_file(file_obj, role, skillset)
structured = await chats_r(chat, json.dumps(unstructured))
return structured
# Initialising Clients
client = genai.Client(api_key=GOOGLE_API_KEY_P)
client2 = genai.Client(api_key=GOOGLE_API_KEY_C)
job_model = initialize_model(client, SYSTEM)
chat_model = initialize_model(client2, SYSTEM_PROMPT_R)
model = initialize_model(client, SYSTEM_PROMPT)
# Async processing function which replicates your main logic
async def process_resumes_async(prompt: str, openings: int, resume_files: list):
conn()
# JobString Extraction from Input
response = posting(job_model, prompt)
text = jsond(response.text)["output"]
job_role = str(text["job_role"])
job_skills = str(text["job_skills"])
Description = str(text["description"])
# Use the uploaded resume files
all_documents = []
for file_obj in resume_files:
print(f"Processing Resume: {file_obj.filename}")
structured_resume = await un_to_st_file(BytesIO(await file_obj.read()), job_role, job_skills, chat_model)
skills = structured_resume["Skills"]
job_skill_doc = Document(
page_content=f"SkillSet: {skills}, Resume Data: {structured_resume}",
metadata={"id": random.randint(100, 1000), "Role": job_role}
)
all_documents.append(job_skill_doc)
# Adding parsed resumes to the vector database
if all_documents:
print("Adding resumes to database...")
add_doc(all_documents)
else:
return {"error": "No valid resumes found"}
print("Required Skillset: ", job_skills, "\n")
employees = await employee_matching(job_skills, openings)
# Evaluating Matched Employees Data Using Agent
candidates = []
for employee in employees:
response = await chats(model, json.dumps(employee.page_content))
candidate_output = jsond(response.text)
candidates.append(candidate_output)
return {
"job_role": job_role,
"job_skills": job_skills,
"description": Description,
"candidates": candidates
}
# Initialize FastAPI app
app = FastAPI()
# Enable CORS
app.add_middleware(
CORSMiddleware,
allow_origins=["*"], # Allow all origins (you can modify this as needed)
allow_credentials=True,
allow_methods=["*"], # Allow all methods (GET, POST, etc.)
allow_headers=["*"], # Allow all headers
)
@app.post('/process_resumes')
async def process_resumes(prompt: str = Form(...), openings: int = Form(1), resumes: list[UploadFile] = File(...)):
print("=== Received a POST request to /process_resumes ===")
print(f"Prompt received: {prompt}")
print(f"Openings received: {openings}")
print(f"Number of resume files received: {len(resumes)}")
# Validate the openings parameter
if not isinstance(openings, int):
print(f"Error converting openings to int: {openings}. Invalid number.")
return JSONResponse(status_code=400, content={"error": "Invalid number of openings"})
if not prompt or not resumes:
print("Missing prompt or resume files. Aborting request.")
return JSONResponse(status_code=400, content={"error": "Missing prompt or resume files"})
# Process resumes asynchronously
try:
print("Starting asynchronous processing of resumes...")
result = await process_resumes_async(prompt, openings, resumes)
print("Asynchronous processing completed successfully.")
print("Result:", json.dumps(result, indent=2))
except Exception as e:
print(f"Error during processing resumes: {e}")
return JSONResponse(status_code=500, content={"error": "Internal server error", "details": str(e)})
return JSONResponse(status_code=200, content=result)
def connect_db():
try:
conn = pymysql.connect(
host=os.getenv("MYSQL_HOST"),
user=os.getenv("MYSQL_USER"),
password=os.getenv("MYSQL_PWD"),
database=os.getenv("MYSQL_DB"),
port = int(os.getenv("MYSQL_PORT")),
cursorclass=pymysql.cursors.DictCursor
)
return conn
except Exception as e:
print("Error connecting to database:", e)
raise
def update_employee_status(cursor, conn, employee_id, new_status='Busy'):
try:
update_query = "UPDATE employees SET availability_status = %s WHERE id = %s"
cursor.execute(update_query, (new_status, employee_id))
conn.commit()
except Exception as e:
print("Error updating status for employee id {}: {}".format(employee_id, e))
conn.rollback()
def clean_employee(emp):
if not emp:
return None
emp_copy = emp.copy()
emp_copy.pop('skills_embedding', None)
return emp_copy
def fetch_assignment_data(cursor, project_id):
# Fetch project by id
cursor.execute("SELECT * FROM projects WHERE id = %s", (project_id,))
project = cursor.fetchone()
# Fetch project requirements for that project
cursor.execute("SELECT * FROM project_requirements WHERE project_id = %s", (project_id,))
requirements = cursor.fetchall()
# Fetch all employees with skills, proficiency_level, and years_experience
cursor.execute("""
SELECT e.id, e.name, e.role, e.availability_status, es.skills, es.proficiency_level, es.years_experience
FROM employees e
JOIN employee_skills es ON e.id = es.employee_id
""")
employees = cursor.fetchall()
return project, requirements, employees
def filter_available(employees):
return [emp for emp in employees if emp['availability_status'].lower() == 'available']
def compute_embeddings(employees, model):
for emp in employees:
# Convert the comma-separated skills string into a single sentence
emp['skills_embedding'] = model.encode(emp['skills'], convert_to_tensor=True)
def match_role_with_threshold(requirement_text, candidates, model, start_threshold, top_n=1, lower_bound=3):
"""
Given a requirement text and a list of candidate employees,
first filter candidates whose proficiency_level >= threshold.
If no candidates found, lower the threshold iteratively until lower_bound.
Then, select top_n candidates based on cosine similarity between
the requirement embedding and the candidate's skills embedding.
"""
req_embedding = model.encode(requirement_text, convert_to_tensor=True)
def candidate_similarity(emp):
return util.pytorch_cos_sim(req_embedding, emp['skills_embedding']).item()
for threshold in range(start_threshold, lower_bound - 1, -1):
filtered = [emp for emp in candidates if emp['proficiency_level'] >= threshold]
if filtered:
filtered.sort(key=lambda emp: candidate_similarity(emp), reverse=True)
return filtered[:top_n]
if candidates:
candidates.sort(key=lambda emp: candidate_similarity(emp), reverse=True)
return candidates[:top_n]
return []
def get_candidates_for_role(req, min_threshold, candidates, model, num_candidates=1):
if req is None:
return []
return match_role_with_threshold(req['required_skill'], candidates, model, start_threshold=min_threshold, top_n=num_candidates, lower_bound=3)
@app.get('/api/projects')
async def get_projects():
try:
conn = connect_db()
cursor = conn.cursor()
cursor.execute("SELECT * FROM projects")
projects = cursor.fetchall()
cursor.close()
conn.close()
# Return data as JSON response
return JSONResponse(content=projects)
except Exception as e:
raise HTTPException(status_code=500, detail=f"Error fetching projects: {str(e)}")
# Pydantic model for validating incoming request data
class ProjectCreate(BaseModel):
name: str
description: str
required_skill: str
@app.post('/api/projects')
async def create_project(project: ProjectCreate):
name = project.name
description = project.description
required_skill = project.required_skill
try:
# Connect to the database
conn = connect_db()
cursor = conn.cursor()
# Insert into the projects table
project_query = "INSERT INTO projects (name, description) VALUES (%s, %s) RETURNING id"
cursor.execute(project_query, (name, description))
project_id = cursor.fetchone()[0] # Retrieve the auto-generated project ID
# Insert into the project_requirements table
req_query = "INSERT INTO project_requirements (project_id, required_skill) VALUES (%s, %s)"
cursor.execute(req_query, (project_id, required_skill))
conn.commit()
cursor.close()
conn.close()
# Return success response
return {"message": "Project created successfully", "project_id": project_id}
except Exception as e:
# Handle errors and return an appropriate response
raise HTTPException(status_code=500, detail=f"Error creating project: {str(e)}")
# Pydantic models for request and response validation
class Employee(BaseModel):
id: int
name: str
skill: str
proficiency_level: Optional[int] = None
class ProjectAssignmentResponse(BaseModel):
message: str
assignments: dict # Define the assignments as a dictionary where key is role and value is Employee or None
# Make fields Optional so that None values are accepted
frontend_developer: Optional[Employee] = None
backend_developer: Optional[Employee] = None
aiml_engineers: Optional[List[Employee]] = []
@app.post("/api/assign-team/{project_id}", response_model=ProjectAssignmentResponse)
async def assign_team(project_id: int):
try:
conn = connect_db()
cursor = conn.cursor()
# Fetch project, requirements, and employees for the given project id
project, requirements, employees = fetch_assignment_data(cursor, project_id)
if not project:
raise HTTPException(status_code=404, detail="Project not found")
available_employees = filter_available(employees)
# Load SentenceTransformer model and compute embeddings for available employees
model = SentenceTransformer('all-MiniLM-L6-v2')
compute_embeddings(available_employees, model)
frontend_req = next((req for req in requirements if any(kw.lower() in req['required_skill'].lower()
for kw in ['React', 'UI/UX', 'Frontend'])), None)
backend_req = next((req for req in requirements if any(kw.lower() in req['required_skill'].lower()
for kw in ['Node.js', 'Express', 'Backend'])), None)
aiml_req = next((req for req in requirements if any(kw.lower() in req['required_skill'].lower()
for kw in ['Machine Learning', 'TensorFlow', 'PyTorch', 'NLP'])), None)
# Set starting thresholds; use default if not provided
frontend_min = frontend_req['min_proficiency_level'] if frontend_req and frontend_req.get('min_proficiency_level') else 4
backend_min = backend_req['min_proficiency_level'] if backend_req and backend_req.get('min_proficiency_level') else 4
aiml_min = aiml_req['min_proficiency_level'] if aiml_req and aiml_req.get('min_proficiency_level') else 5
team_assignments = {}
# Frontend role
frontend_candidates = get_candidates_for_role(frontend_req, frontend_min, available_employees, model, num_candidates=1)
frontend_candidate = frontend_candidates[0] if frontend_candidates else None
if frontend_candidate:
team_assignments['frontend_developer'] = frontend_candidate
available_employees = [emp for emp in available_employees if emp['id'] != frontend_candidate['id']]
else:
team_assignments['frontend_developer'] = None
# Backend role
backend_candidates = get_candidates_for_role(backend_req, backend_min, available_employees, model, num_candidates=1)
backend_candidate = backend_candidates[0] if backend_candidates else None
if backend_candidate:
team_assignments['backend_developer'] = backend_candidate
available_employees = [emp for emp in available_employees if emp['id'] != backend_candidate['id']]
else:
team_assignments['backend_developer'] = None
# AIML role: need 3 candidates
aiml_candidates = match_role_with_threshold(aiml_req['required_skill'], available_employees, model, start_threshold=aiml_min, top_n=3, lower_bound=3) if aiml_req else []
team_assignments['aiml_engineers'] = aiml_candidates
for candidate in aiml_candidates:
available_employees = [emp for emp in available_employees if emp['id'] != candidate['id']]
# Update employee statuses and insert assignments into assigned_projects table
assignments = []
if team_assignments.get('frontend_developer'):
emp = team_assignments['frontend_developer']
update_employee_status(cursor, conn, emp['id'], 'Busy')
assignments.append(('frontend', project_id, emp['id']))
if team_assignments.get('backend_developer'):
emp = team_assignments['backend_developer']
update_employee_status(cursor, conn, emp['id'], 'Busy')
assignments.append(('backend', project_id, emp['id']))
for emp in team_assignments.get('aiml_engineers', []):
update_employee_status(cursor, conn, emp['id'], 'Busy')
assignments.append(('aiml', project_id, emp['id']))
for role, proj_id, emp_id in assignments:
cursor.execute(
"INSERT INTO assigned_projects (project_id, employee_id, role, assigned_at) VALUES (%s, %s, %s, NOW())",
(proj_id, emp_id, role)
)
conn.commit()
cursor.close()
conn.close()
# Ensure all fields are explicitly included (even if None or empty)
response_assignments = {
"frontend_developer": clean_employee(team_assignments.get('frontend_developer')) if team_assignments.get('frontend_developer') else None,
"backend_developer": clean_employee(team_assignments.get('backend_developer')) if team_assignments.get('backend_developer') else None,
"aiml_engineers": [clean_employee(emp) for emp in team_assignments.get('aiml_engineers', [])] if team_assignments.get('aiml_engineers') else []
}
return {"message": "Team assignments updated successfully", "assignments": response_assignments}
except Exception as e:
raise HTTPException(status_code=500, detail=f"Internal Server Error: {str(e)}")
# Configure logging for better debugging
logging.basicConfig(level=logging.INFO)
# Create a folder to store uploaded files
UPLOAD_FOLDER = 'uploads/'
os.makedirs(UPLOAD_FOLDER, exist_ok=True)
# Allowed file types for uploads
ALLOWED_EXTENSIONS = {'csv'}
def allowed_file(filename):
return '.' in filename and filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
# Function to add the interview event to the Google Calendar
def add_event_to_calendar(candidate_name, candidate_email, interview_date, interview_time, slot):
try:
SCOPES = ["https://www.googleapis.com/auth/calendar"]
# Ensure the token file exists
if not os.path.exists("token.pkl"):
raise FileNotFoundError("Token file not found. Please authenticate first.")
with open("token.pkl", "rb") as token_file:
creds = pickle.load(token_file)
service = build("calendar", "v3", credentials=creds)
start_time = datetime.strptime(f"{interview_date} {interview_time}", "%Y-%m-%d %H:%M")
end_time = start_time + timedelta(minutes=slot)
event = {
"summary": f"Interview with {candidate_name}",
"location": "Virtual / Office",
"description": f"Your interview at WeHack is scheduled.",
"start": {"dateTime": start_time.isoformat(), "timeZone": "Asia/Kolkata"},
"end": {"dateTime": end_time.isoformat(), "timeZone": "Asia/Kolkata"},
"attendees": [{"email": candidate_email}],
"reminders": {
"useDefault": False,
"overrides": [{"method": "email", "minutes": 30}, {"method": "popup", "minutes": 10}],
},
}
event = service.events().insert(calendarId="primary", body=event).execute()
logging.info(f"โœ… Interview event created: {event.get('htmlLink')}")
except Exception as e:
logging.error(f"Error creating calendar event for {candidate_name}: {e}")
# Function to schedule interview slots
def schedule_slots(start, slot, break_time, candids):
try:
start_time = datetime.strptime(start, "%Y-%m-%d %H:%M")
interview_start = start_time.replace(hour=9, minute=0)
interview_end = start_time.replace(hour=16, minute=0)
lunch_start = start_time.replace(hour=12, minute=30)
lunch_end = lunch_start + timedelta(minutes=break_time)
current_time = interview_start
day_count = 1
schedule = []
for _, row in candids.iterrows():
if current_time >= interview_end:
day_count += 1
current_time = current_time.replace(hour=9, minute=0) + timedelta(days=1)
if lunch_start <= current_time < lunch_end:
current_time = lunch_end
end_slot = current_time + timedelta(minutes=slot)
if end_slot > interview_end:
day_count += 1
current_time = current_time.replace(hour=9, minute=0) + timedelta(days=1)
end_slot = current_time + timedelta(minutes=slot)
schedule.append(end_slot.strftime("%Y-%m-%d %H:%M"))
candids.at[_, 'Day'] = day_count
current_time = end_slot
candids['alloted'] = schedule
return candids
except Exception as e:
logging.error(f"Error scheduling slots: {e}")
return pd.DataFrame() # Return empty DataFrame in case of error
# Function to send interview emails
def send_emails(candidates, slot):
SENDGRID_API_KEY = os.getenv("SendGrid_Secret")
SENDER_EMAIL = os.getenv("EMAIL") # Verified sender email in SendGrid
if not SENDGRID_API_KEY or not SENDER_EMAIL:
logging.error("SendGrid API key or sender email not set in environment variables.")
return
sg_client = SendGridAPIClient(SENDGRID_API_KEY)
print("Starting SendGrid email sending")
for _, row in candidates.iterrows():
recipient_email = row['Email']
name = row['Name']
interview_date = row["alloted"][:10].strip()
interview_time = row["alloted"][10:].strip()
unique_id = datetime.now().strftime("%Y%m%d%H%M%S")
subject = f"Your Interview at WeHack is Scheduled ๐Ÿš€ [Ref: {unique_id}]"
# Customize the email HTML content as needed
body = f"""
<html>
<body>
<p>Dear {name},</p>
<p>We are pleased to inform you that, following a review of your application,
you have been selected to proceed to the next stage of the hiring process at <b>WeHack</b>.</p>
<p><b>Date:</b> {interview_date}<br>
<b>Time:</b> {interview_time}<br>
<b>Mode:</b> Will be contacted further</p>
<p>This interview presents an excellent opportunity for us to further explore your qualifications,
experiences, and interest in the position. We are eager to learn more about you and how you envision contributing to our team.</p>
<p>Please ensure your availability at the scheduled time. Should you have any questions, need to reschedule,
or require assistance, do not hesitate to contact us.</p>
<p>We look forward to our conversation and appreciate your continued interest in joining <b>WeHack</b>.</p>
<p>&nbsp;</p> <!-- Invisible space to prevent auto-quoting -->
<p>Kind regards,<br>
HR<br>
WeHack Organization<br>
[Contact Information]</p>
</body>
</html>
"""
message = Mail(
from_email=SENDER_EMAIL,
to_emails=recipient_email,
subject=subject,
html_content=body
)
try:
response = sg_client.send(message)
if 200 <= response.status_code < 300:
print(f"โœ… Email sent to {name} ({recipient_email})")
# Add event to calendar only after successful email
add_event_to_calendar(name, recipient_email, interview_date, interview_time, slot)
else:
logging.error(f"Failed to send email to {recipient_email}, status code: {response.status_code}")
except Exception as e:
logging.error(f"Exception while sending email to {recipient_email}: {e}")
time.sleep(2) # avoid rate limiting
print("โœ… All emails have been sent successfully!")
@app.get("/download/{filename}")
async def download_file(filename: str):
file_path = os.path.join(UPLOAD_FOLDER, filename)
# Check if file exists
if not os.path.exists(file_path):
raise HTTPException(status_code=404, detail="File not found")
# Return the file as a download
return FileResponse(file_path, media_type='application/octet-stream', filename=filename)
@app.post("/api/schedule")
async def schedule_interviews(start_date: str = Form(...),
slot_length: int = Form(...),
break_time: int = Form(...),
file: UploadFile = File(...)):
print("scheduling point hit")
try:
# Check if file is allowed
if not allowed_file(file.filename):
raise HTTPException(status_code=400, detail="Invalid file format or no file provided")
filename = secure_filename(file.filename)
file_path = os.path.join(UPLOAD_FOLDER, filename)
# Save the file
with open(file_path, "wb") as f:
f.write(await file.read())
# Read candidates from the uploaded CSV file
candidates = pd.read_csv(file_path)
# Schedule interviews
print("Calling Slots")
scheduled_candidates = schedule_slots(start_date, slot_length, break_time, candidates)
# Send emails (if required)
print("Sending Emails")
send_emails(scheduled_candidates, slot_length)
# Return the URL for downloading the file
download_url = f'https://mr-thop-recruit-edge.hf.space/download/{filename}'
return JSONResponse(content={
'message': 'Interviews scheduled successfully!',
'file_url': download_url
})
except Exception as e:
logging.error(f"Error scheduling interviews: {e}")
raise HTTPException(status_code=500, detail="Failed to schedule interviews")
@app.get("/")
async def home():
return "Recruit Edge Backend is Running!"
if __name__== "__main__":
print("Starting FAST app in debug mode...")
app.run(debug=True)