Vizznu19's picture
Create app.py
303a7be verified
# app.py
import os
import gradio as gr
from llama_index.readers.file import PDFReader
from llama_index.core import VectorStoreIndex
from llama_index.llms.openai import OpenAI
from llama_index.core.memory import ChatMemoryBuffer
from llama_index.core.chat_engine import ContextChatEngine
from llama_index.core.llms import ChatMessage, MessageRole
# --- Use HF secret for OpenAI API key ---
OPENAI_API_KEY = os.environ.get("OPENAI_API_KEY")
if not OPENAI_API_KEY:
raise EnvironmentError("OPENAI_API_KEY is not set in environment variables.")
os.environ["OPENAI_API_KEY"] = OPENAI_API_KEY
# --- Global Chat Engine ---
chat_engine = None
resume_text = ""
# --- Follow-up Generator ---
def generate_followups(user_question, resume_excerpt):
llm = OpenAI(model="gpt-3.5-turbo")
prompt = (
f"The user asked: '{user_question}'\n\n"
f"Based on their resume content:\n{resume_excerpt}\n\n"
"Suggest 1–2 intelligent follow-up questions they might ask next. "
"Format it like:\n\nπŸ” Follow-up Suggestions:\n- ...\n- ..."
)
try:
response = llm.complete(prompt)
return response.text.strip()
except Exception:
return "πŸ” Follow-up Suggestions:\n- Unable to generate follow-ups at this time."
# --- Upload & Index Resume ---
def upload_and_index(file):
global chat_engine, resume_text
try:
reader = PDFReader()
documents = reader.load_data(file=file.name)
resume_text = "\n".join(doc.text for doc in documents[:1])
index = VectorStoreIndex.from_documents(documents)
retriever = index.as_retriever(similarity_top_k=3)
memory = ChatMemoryBuffer.from_defaults(token_limit=3000)
llm = OpenAI(model="gpt-3.5-turbo")
system_prompt = ChatMessage(
role=MessageRole.SYSTEM,
content=(
"You are a professional resume assistant. Your goal is to help users understand their resume better and explore relevant opportunities. "
"Use the uploaded resume and past questions to craft structured, formal answers. Include bullet points and use headings like Skills, Experience, or Education where appropriate."
)
)
chat_engine = ContextChatEngine.from_defaults(
retriever=retriever,
memory=memory,
llm=llm,
system_prompt=system_prompt
)
return "βœ… Resume indexed successfully. You can now chat with the assistant."
except Exception as e:
return f"❌ Error during indexing: {str(e)}"
# --- Chat Function ---
def chat_with_bot(message, history):
if chat_engine is None:
return "❌ Please upload and index your resume first."
try:
main_response = chat_engine.chat(message)
followup = generate_followups(message, resume_text)
return f"{str(main_response)}\n\n{followup}"
except Exception as e:
return f"❌ Error: {str(e)}"
# --- UI ---
with gr.Blocks() as iface:
gr.Markdown("""
<h1 style='text-align: center;'>Revue</h1>
<p style='text-align: center; color: gray;'>Upload your resume and ask career-related questions. The assistant remembers context and suggests what to ask next.</p>
<hr>
""")
with gr.Group():
file_input = gr.File(label="πŸ“Ž Upload Resume (PDF)", file_types=[".pdf"])
upload_btn = gr.Button("πŸ“₯ Upload & Index")
status_box = gr.Textbox(label="Status", interactive=False)
upload_btn.click(fn=upload_and_index, inputs=file_input, outputs=status_box)
gr.Markdown("### πŸ’¬ Ask Questions About Your Resume")
chatbot = gr.ChatInterface(
fn=chat_with_bot,
chatbot=gr.Chatbot(),
textbox=gr.Textbox(placeholder="e.g. What are my key strengths for data science?", lines=1),
)
iface.launch()