Summeya commited on
Commit
7226ced
Β·
verified Β·
1 Parent(s): b22d596

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +10 -18
app.py CHANGED
@@ -4,21 +4,20 @@ import gradio as gr
4
  import openai
5
  from prompts import SYSTEM_PROMPT
6
 
7
- # πŸ“₯ Load environment variables
8
  load_dotenv()
9
 
10
  client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
11
 
12
- # πŸ” Transcribe audio input using OpenAI Whisper API
13
  def transcribe_audio(audio_file: str) -> str:
14
  with open(audio_file, "rb") as f:
15
  transcript = client.audio.transcriptions.create(
16
  model="whisper-1",
17
  file=f
18
  )
19
- return transcript.text
20
 
21
- # πŸ’¬ Call OpenAI chat with the transcribed or text input
22
  def call_openai(event_details: str) -> str:
23
  prompt = SYSTEM_PROMPT.format(event_details)
24
  response = client.chat.completions.create(
@@ -27,33 +26,26 @@ def call_openai(event_details: str) -> str:
27
  temperature=0.5,
28
  max_tokens=1024,
29
  )
30
- return response.choices[0].message.content
 
 
 
31
 
32
- # πŸ”„ New function to handle audio input and route to existing pipeline
33
  def case_study_from_audio(audio: str) -> str:
34
  text = transcribe_audio(audio)
35
  return case_study_generator(text)
36
 
37
- # πŸŽ›οΈ Original function β€” unchanged but now typed
38
- def case_study_generator(event_details: str) -> str:
39
- return call_openai(event_details)
40
-
41
- # πŸš€ Gradio interface with two modes: Text or Audio
42
  with gr.Blocks() as demo:
43
  gr.Markdown("## πŸ“„ iBoothMe Case Study Generator\nWrite a description or record audio.")
44
-
45
  with gr.Tab("Text Input"):
46
- text_input = gr.Textbox(
47
- label="Event Description",
48
- lines=10,
49
- placeholder="Paste informal event description here...",
50
- )
51
  text_button = gr.Button("Generate from Text")
52
  text_output = gr.Textbox(label="Generated Case Study", lines=15)
53
  text_button.click(fn=case_study_generator, inputs=text_input, outputs=text_output)
54
 
55
  with gr.Tab("Audio Input"):
56
- audio_input = gr.Audio(type="filepath", label="Upload or Record your event description")
57
  audio_button = gr.Button("Generate from Audio")
58
  audio_output = gr.Textbox(label="Generated Case Study", lines=15)
59
  audio_button.click(fn=case_study_from_audio, inputs=audio_input, outputs=audio_output)
 
4
  import openai
5
  from prompts import SYSTEM_PROMPT
6
 
7
+ # Load environment variables
8
  load_dotenv()
9
 
10
  client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
11
 
12
+ # Ensure all outputs are returned as string
13
  def transcribe_audio(audio_file: str) -> str:
14
  with open(audio_file, "rb") as f:
15
  transcript = client.audio.transcriptions.create(
16
  model="whisper-1",
17
  file=f
18
  )
19
+ return str(transcript.text)
20
 
 
21
  def call_openai(event_details: str) -> str:
22
  prompt = SYSTEM_PROMPT.format(event_details)
23
  response = client.chat.completions.create(
 
26
  temperature=0.5,
27
  max_tokens=1024,
28
  )
29
+ return str(response.choices[0].message.content)
30
+
31
+ def case_study_generator(event_details: str) -> str:
32
+ return call_openai(event_details)
33
 
 
34
  def case_study_from_audio(audio: str) -> str:
35
  text = transcribe_audio(audio)
36
  return case_study_generator(text)
37
 
 
 
 
 
 
38
  with gr.Blocks() as demo:
39
  gr.Markdown("## πŸ“„ iBoothMe Case Study Generator\nWrite a description or record audio.")
40
+
41
  with gr.Tab("Text Input"):
42
+ text_input = gr.Textbox(label="Event Description", lines=10)
 
 
 
 
43
  text_button = gr.Button("Generate from Text")
44
  text_output = gr.Textbox(label="Generated Case Study", lines=15)
45
  text_button.click(fn=case_study_generator, inputs=text_input, outputs=text_output)
46
 
47
  with gr.Tab("Audio Input"):
48
+ audio_input = gr.Audio(type="filepath", label="Upload or Record audio")
49
  audio_button = gr.Button("Generate from Audio")
50
  audio_output = gr.Textbox(label="Generated Case Study", lines=15)
51
  audio_button.click(fn=case_study_from_audio, inputs=audio_input, outputs=audio_output)