AnnaMathews commited on
Commit
d41636e
Β·
verified Β·
1 Parent(s): 622f093

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -12
app.py CHANGED
@@ -3,18 +3,17 @@ import gradio as gr
3
  from llama_index.readers.file import PDFReader
4
  from llama_index.core import VectorStoreIndex
5
  from llama_index.llms.openai import OpenAI as LlamaOpenAI
6
- from openai import OpenAI
7
- import openai
8
  from gtts import gTTS
9
  import random
10
  import tempfile
11
  import speech_recognition as sr
12
 
13
- # Set API key (use HF secret if available)
14
- openai.api_key = os.environ.get('HF_OPENAI_API_KEY', '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')
15
- client = OpenAI() # OpenAI SDK
16
 
17
- # Globals
18
  query_engine = None
19
  resume_summary = ""
20
  questions = []
@@ -100,22 +99,22 @@ Only return: Score: <number>
100
  except Exception as e:
101
  return "❌ Evaluation failed", str(e)
102
 
103
- # Gradio UI
104
  with gr.Blocks() as demo:
105
- gr.Markdown("# 🎀 Voice-Based Resume Interview Bot\nUpload your resume, get voice questions, answer by mic, and receive feedback.")
106
 
107
  with gr.Row():
108
  resume_file = gr.File(label="πŸ“„ Upload Resume", file_types=[".pdf"])
109
  resume_summary_box = gr.Textbox(label="Resume Summary", lines=6)
110
  gr.Button("Analyze Resume").click(fn=load_resume, inputs=resume_file, outputs=resume_summary_box)
111
 
112
- gr.Markdown("## πŸ”Š Ask Next Resume-Based Question")
113
  question_text = gr.Textbox(label="Question", lines=2)
114
- question_audio = gr.Audio(label="Listen", type="filepath", autoplay=True)
115
  gr.Button("Ask Question").click(fn=ask_next_question, outputs=[question_text, question_audio])
116
 
117
- gr.Markdown("## πŸŽ™οΈ Answer the Question with Your Voice")
118
- user_audio = gr.Audio(label="🎀 Record Answer", type="filepath")
119
  transcript_output = gr.Textbox(label="Transcript")
120
  score_output = gr.Textbox(label="LLM Score")
121
  gr.Button("Submit Answer").click(fn=evaluate_answer, inputs=user_audio, outputs=[transcript_output, score_output])
 
3
  from llama_index.readers.file import PDFReader
4
  from llama_index.core import VectorStoreIndex
5
  from llama_index.llms.openai import OpenAI as LlamaOpenAI
6
+ from openai import OpenAI # New SDK
 
7
  from gtts import gTTS
8
  import random
9
  import tempfile
10
  import speech_recognition as sr
11
 
12
+ # πŸ” Set API key directly (use HF secret or hardcode for testing)
13
+ OPENAI_API_KEY = os.environ.get('HF_OPENAI_API_KEY', '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') # Replace or use HF secret
14
+ client = OpenAI(api_key=OPENAI_API_KEY)
15
 
16
+ # Global state
17
  query_engine = None
18
  resume_summary = ""
19
  questions = []
 
99
  except Exception as e:
100
  return "❌ Evaluation failed", str(e)
101
 
102
+ # Gradio Interface
103
  with gr.Blocks() as demo:
104
+ gr.Markdown("# 🎀 Voice Resume Interview Bot\nUpload your resume β†’ get questions in voice β†’ answer via mic β†’ get score.")
105
 
106
  with gr.Row():
107
  resume_file = gr.File(label="πŸ“„ Upload Resume", file_types=[".pdf"])
108
  resume_summary_box = gr.Textbox(label="Resume Summary", lines=6)
109
  gr.Button("Analyze Resume").click(fn=load_resume, inputs=resume_file, outputs=resume_summary_box)
110
 
111
+ gr.Markdown("## πŸ”Š Ask a Resume-Based Question")
112
  question_text = gr.Textbox(label="Question", lines=2)
113
+ question_audio = gr.Audio(label="Voice", type="filepath", autoplay=True)
114
  gr.Button("Ask Question").click(fn=ask_next_question, outputs=[question_text, question_audio])
115
 
116
+ gr.Markdown("## πŸŽ™οΈ Speak Your Answer")
117
+ user_audio = gr.Audio(label="Record Your Answer", type="filepath")
118
  transcript_output = gr.Textbox(label="Transcript")
119
  score_output = gr.Textbox(label="LLM Score")
120
  gr.Button("Submit Answer").click(fn=evaluate_answer, inputs=user_audio, outputs=[transcript_output, score_output])