Sazzz02's picture
Update app.py
35ea9d6 verified
import gradio as gr
import requests
import os
import PyPDF2
from io import BytesIO
# Hugging Face API and model
HF_API_TOKEN = os.getenv("HF_API_TOKEN")
HF_API_URL = "https://api-inference.huggingface.co/models/HuggingFaceH4/zephyr-7b-beta"
MODEL_CHECK_PROMPT = "Say hello!"
# βœ… Check whether the API key and model are working correctly
def check_api_ready():
if not HF_API_TOKEN:
return False, "❌ HF_API_TOKEN not set. Please add in Space secrets."
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
try:
resp = requests.post(HF_API_URL, headers=headers, json={"inputs": MODEL_CHECK_PROMPT}, timeout=20)
if resp.status_code == 200:
data = resp.json()
if isinstance(data, dict) and "error" in data:
return False, f"❌ Model Error: {data['error']}"
if isinstance(data, list) and data[0].get("generated_text"):
return True, "βœ… Model is ready!"
elif resp.status_code == 401:
return False, "❌ Unauthorized. Check your API token."
else:
return False, f"❌ Unexpected API response: {resp.text}"
except Exception as e:
return False, f"❌ API connection failed: {str(e)}"
# βœ… Tooltip animation HTML from Lottie JSON (place Robotics-Students.json in Space root)
def lottie_html():
return """
<script src="https://unpkg.com/@lottiefiles/lottie-player@latest/dist/lottie-player.js"></script>
<lottie-player src='Robotics-Students.json' background='transparent' speed='1' style='width:340px; height:340px; margin-bottom:12px;' loop autoplay></lottie-player>
"""
# βœ… PDF text extractor
def extract_text_from_pdf(pdf_file):
reader = PyPDF2.PdfReader(BytesIO(pdf_file.read()))
return "\n".join([page.extract_text() or "" for page in reader.pages]).strip()
# βœ… Generate questions and answers using language model
def ai_generate_questions(resume_text, job_title):
prompt = (
f"You are an AI interview coach.\n"
f"Candidate Resume:\n{resume_text}\n"
f"Target Role: {job_title}\n"
f"Generate 10 realistic interview questions based on this person’s resume, "
f"and for each question provide a coaching tip to help them answer effectively."
)
headers = {"Authorization": f"Bearer {HF_API_TOKEN}"}
payload = {"inputs": prompt, "parameters": {"max_new_tokens": 512, "temperature": 0.7}}
try:
response = requests.post(HF_API_URL, headers=headers, json=payload, timeout=60)
data = response.json()
if "error" in data:
return f"<div style='color:red;'>❌ API Error: {data['error']}</div>"
return data[0]["generated_text"].strip()
except Exception as e:
return f"<div style='color:red;'>❌ Exception: {str(e)}</div>"
# βœ… Format final output HTML
def render_output(name, job, status, queue, messages, model_message, show_lottie=True):
lottie_side = lottie_html() if show_lottie else ""
model_msg = f"<div style='margin-top:12px;font-size:0.9rem;color:#46a546;'>{model_message}</div>" if model_message else ""
left_panel = f"""
{lottie_side}
<div style='background:#f4f6fa;padding:18px;border-radius:16px;
max-width:340px;box-shadow:0 2px 14px #ccd4e6b3;font-family:sans-serif;'>
<div style='font-size:1.25rem;font-weight:700;'>πŸ€– ROBOT RESUME ANALYZER</div>
<div style='margin-top:10px;'><b>Status:</b> <span style='color:#058c42'>{status}</span></div>
<div style='margin-top:10px;'><b>Current:</b> {name}</div>
<div><b>Position:</b> {job}</div>
<div style='margin-top:10px;'><b>Analysis Progress:</b></div>
<div style='font-size:2rem;'>πŸ‘₯</div>
<div><b>QUEUE</b></div>
<div style='font-size:1.5rem;font-weight:700;'>{queue}</div>
<div><i>Analyzing Next...</i></div>
{model_msg}
</div>
"""
return f"""
<div style='display:flex;gap:30px;'>
{left_panel}
<div style='max-width:620px;margin-top:15px;'>{messages}</div>
</div>
"""
# βœ… Main app logic with staged UI updates
def interface(pdf_file, job_title):
is_ready, msg = check_api_ready()
if not is_ready:
err_html = f"<div style='color:red;'><b>ERROR:</b> {msg}</div>"
return render_output("---", "---", "Unavailable", 0, err_html, msg, show_lottie=False)
if not pdf_file:
return render_output("---", "---", "Waiting for PDF", 1, "<b>Please upload a PDF resume.</b>", msg)
if not job_title.strip():
name = pdf_file.name.replace('.pdf','')
return render_output(name, "---", "Waiting for Job Title", 1, "<b>Please enter a target job title.</b>", msg)
name = pdf_file.name.replace(".pdf", "")
try:
pdf_file.seek(0)
resume_text = extract_text_from_pdf(pdf_file)
if not resume_text:
raise Exception("PDF contains no readable text")
except Exception as e:
return render_output(name, job_title, "PDF Error", 1, f"<div style='color:red;'>Failed to extract PDF: {e}</div>", msg)
yield render_output(name, job_title, "Analyzing...", 1, "<i>Generating questions with AI...</i>", msg)
ai_output = ai_generate_questions(resume_text, job_title)
questions = [q.strip() for q in ai_output.split("\n") if q.strip()]
html = "<h3>βœ… Interview Questions & Coaching Tips</h3>"
for i, q in enumerate(questions[:10], 1):
html += f"<div style='background:#fff;padding:16px;border-radius:10px;margin-bottom:12px;box-shadow:0 2px 6px #ddd;'>"
html += f"<b>Q{i}:</b> {q}</div>"
yield render_output(name, job_title, "Done βœ…", 0, html, msg)
# βœ… Launch Gradio app
with gr.Interface(
fn=interface,
inputs=[
gr.File(label="Upload PDF Resume", type="binary", file_types=[".pdf"]),
gr.Textbox(label="Target Job Title", placeholder="e.g. Data Analyst"),
],
outputs=gr.HTML(),
allow_flagging="never",
title="πŸ€– AI Resume Analyzer + Interview Coach (PDF + Lottie)",
live=False,
) as demo:
demo.launch()