resumebot / app.py
ogflash's picture
Update app.py
7a23dbb verified
import os
import gradio as gr
from llama_index.readers.file import PDFReader
from llama_index.core import VectorStoreIndex
from llama_index.core.chat_engine.types import BaseChatEngine
# Set your OpenAI API key from environment (set this in Hugging Face Secrets)
os.environ['OPENAI_API_KEY'] = os.getenv("OPENAI_API_KEY")
# Globals
chat_engine: BaseChatEngine = None
# Function to process uploaded resume
def process_resume(file):
global chat_engine
if file is None:
return "⚠️ Please upload a PDF file."
reader = PDFReader()
documents = reader.load_data(file=file)
index = VectorStoreIndex.from_documents(documents)
chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=False)
return "✅ Resume uploaded and indexed! You can now ask questions."
# Chat function
def chat_with_resume(message, chat_history):
global chat_engine
if not chat_engine:
return "⚠️ Please upload a resume first.", chat_history
response = chat_engine.chat(message)
chat_history.append({"role": "user", "content": message})
chat_history.append({"role": "assistant", "content": response.response})
return "", chat_history
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("# 📄 Resume Chatbot\nUpload your resume and ask questions about your experience, skills, and more.")
with gr.Row():
file_input = gr.File(label="Upload Resume (PDF)", file_types=[".pdf"])
upload_button = gr.Button("Process Resume")
upload_output = gr.Textbox(label="Status")
upload_button.click(fn=process_resume, inputs=file_input, outputs=upload_output)
chatbot = gr.Chatbot(label="Chat with Resume", type="messages")
message = gr.Textbox(placeholder="Ask something like: What are my key skills?", label="Your Question")
send = gr.Button("Send")
send.click(chat_with_resume, inputs=[message, chatbot], outputs=[message, chatbot])
demo.launch()