ogflash commited on
Commit
7a23dbb
·
verified ·
1 Parent(s): a2839a0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +7 -4
app.py CHANGED
@@ -4,7 +4,7 @@ from llama_index.readers.file import PDFReader
4
  from llama_index.core import VectorStoreIndex
5
  from llama_index.core.chat_engine.types import BaseChatEngine
6
 
7
- # Set your OpenAI API key here (or via Hugging Face Secrets)
8
  os.environ['OPENAI_API_KEY'] = os.getenv("OPENAI_API_KEY")
9
 
10
  # Globals
@@ -13,8 +13,10 @@ chat_engine: BaseChatEngine = None
13
  # Function to process uploaded resume
14
  def process_resume(file):
15
  global chat_engine
 
 
16
  reader = PDFReader()
17
- documents = reader.load_data(file=file.name)
18
  index = VectorStoreIndex.from_documents(documents)
19
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=False)
20
  return "✅ Resume uploaded and indexed! You can now ask questions."
@@ -25,7 +27,8 @@ def chat_with_resume(message, chat_history):
25
  if not chat_engine:
26
  return "⚠️ Please upload a resume first.", chat_history
27
  response = chat_engine.chat(message)
28
- chat_history.append((message, response.response))
 
29
  return "", chat_history
30
 
31
  # Gradio UI
@@ -40,7 +43,7 @@ with gr.Blocks() as demo:
40
 
41
  upload_button.click(fn=process_resume, inputs=file_input, outputs=upload_output)
42
 
43
- chatbot = gr.Chatbot(label="Chat with Resume")
44
  message = gr.Textbox(placeholder="Ask something like: What are my key skills?", label="Your Question")
45
  send = gr.Button("Send")
46
 
 
4
  from llama_index.core import VectorStoreIndex
5
  from llama_index.core.chat_engine.types import BaseChatEngine
6
 
7
+ # Set your OpenAI API key from environment (set this in Hugging Face Secrets)
8
  os.environ['OPENAI_API_KEY'] = os.getenv("OPENAI_API_KEY")
9
 
10
  # Globals
 
13
  # Function to process uploaded resume
14
  def process_resume(file):
15
  global chat_engine
16
+ if file is None:
17
+ return "⚠️ Please upload a PDF file."
18
  reader = PDFReader()
19
+ documents = reader.load_data(file=file)
20
  index = VectorStoreIndex.from_documents(documents)
21
  chat_engine = index.as_chat_engine(chat_mode="condense_question", verbose=False)
22
  return "✅ Resume uploaded and indexed! You can now ask questions."
 
27
  if not chat_engine:
28
  return "⚠️ Please upload a resume first.", chat_history
29
  response = chat_engine.chat(message)
30
+ chat_history.append({"role": "user", "content": message})
31
+ chat_history.append({"role": "assistant", "content": response.response})
32
  return "", chat_history
33
 
34
  # Gradio UI
 
43
 
44
  upload_button.click(fn=process_resume, inputs=file_input, outputs=upload_output)
45
 
46
+ chatbot = gr.Chatbot(label="Chat with Resume", type="messages")
47
  message = gr.Textbox(placeholder="Ask something like: What are my key skills?", label="Your Question")
48
  send = gr.Button("Send")
49