Summeya commited on
Commit
b22d596
Β·
verified Β·
1 Parent(s): 60d3e76

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +61 -61
app.py CHANGED
@@ -1,61 +1,61 @@
1
- import os
2
- from dotenv import load_dotenv
3
- import gradio as gr
4
- import openai
5
- from prompts import SYSTEM_PROMPT
6
-
7
- # πŸ“₯ Load environment variables
8
- load_dotenv()
9
-
10
- client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
11
-
12
- # πŸ” Transcribe audio input using OpenAI Whisper API
13
- def transcribe_audio(audio_file):
14
- with open(audio_file, "rb") as f:
15
- transcript = client.audio.transcriptions.create(
16
- model="whisper-1",
17
- file=f
18
- )
19
- return transcript.text
20
-
21
- # πŸ’¬ Call OpenAI chat with the transcribed or text input
22
- def call_openai(event_details):
23
- prompt = SYSTEM_PROMPT.format(event_details)
24
- response = client.chat.completions.create(
25
- model="gpt-4o",
26
- messages=[{"role": "user", "content": prompt}],
27
- temperature=0.5,
28
- max_tokens=1024,
29
- )
30
- return response.choices[0].message.content
31
-
32
- # πŸ”„ New function to handle audio input and route to existing pipeline
33
- def case_study_from_audio(audio):
34
- text = transcribe_audio(audio)
35
- return case_study_generator(text)
36
-
37
- # πŸŽ›οΈ Original function β€” unchanged
38
- def case_study_generator(event_details):
39
- gpt_response = call_openai(event_details)
40
- return gpt_response
41
-
42
- # πŸš€ Gradio interface with two modes: Text or Audio
43
- with gr.Blocks() as demo:
44
- gr.Markdown("## πŸ“„ iBoothMe Case Study Generator\nWrite a description or record audio.")
45
- with gr.Tab("Text Input"):
46
- text_input = gr.Textbox(
47
- label="Event Description",
48
- lines=10,
49
- placeholder="Paste informal event description here...",
50
- )
51
- text_button = gr.Button("Generate from Text")
52
- text_output = gr.Textbox(label="Generated Case Study", lines=15)
53
- text_button.click(fn=case_study_generator, inputs=text_input, outputs=text_output)
54
-
55
- with gr.Tab("Audio Input"):
56
- audio_input = gr.Audio(type="filepath", label="Upload or Record your event description")
57
- audio_button = gr.Button("Generate from Audio")
58
- audio_output = gr.Textbox(label="Generated Case Study", lines=15)
59
- audio_button.click(fn=case_study_from_audio, inputs=audio_input, outputs=audio_output)
60
-
61
- demo.launch()
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ import gradio as gr
4
+ import openai
5
+ from prompts import SYSTEM_PROMPT
6
+
7
+ # πŸ“₯ Load environment variables
8
+ load_dotenv()
9
+
10
+ client = openai.OpenAI(api_key=os.getenv("OPENAI_API_KEY"))
11
+
12
+ # πŸ” Transcribe audio input using OpenAI Whisper API
13
+ def transcribe_audio(audio_file: str) -> str:
14
+ with open(audio_file, "rb") as f:
15
+ transcript = client.audio.transcriptions.create(
16
+ model="whisper-1",
17
+ file=f
18
+ )
19
+ return transcript.text
20
+
21
+ # πŸ’¬ Call OpenAI chat with the transcribed or text input
22
+ def call_openai(event_details: str) -> str:
23
+ prompt = SYSTEM_PROMPT.format(event_details)
24
+ response = client.chat.completions.create(
25
+ model="gpt-4o",
26
+ messages=[{"role": "user", "content": prompt}],
27
+ temperature=0.5,
28
+ max_tokens=1024,
29
+ )
30
+ return response.choices[0].message.content
31
+
32
+ # πŸ”„ New function to handle audio input and route to existing pipeline
33
+ def case_study_from_audio(audio: str) -> str:
34
+ text = transcribe_audio(audio)
35
+ return case_study_generator(text)
36
+
37
+ # πŸŽ›οΈ Original function β€” unchanged but now typed
38
+ def case_study_generator(event_details: str) -> str:
39
+ return call_openai(event_details)
40
+
41
+ # πŸš€ Gradio interface with two modes: Text or Audio
42
+ with gr.Blocks() as demo:
43
+ gr.Markdown("## πŸ“„ iBoothMe Case Study Generator\nWrite a description or record audio.")
44
+
45
+ with gr.Tab("Text Input"):
46
+ text_input = gr.Textbox(
47
+ label="Event Description",
48
+ lines=10,
49
+ placeholder="Paste informal event description here...",
50
+ )
51
+ text_button = gr.Button("Generate from Text")
52
+ text_output = gr.Textbox(label="Generated Case Study", lines=15)
53
+ text_button.click(fn=case_study_generator, inputs=text_input, outputs=text_output)
54
+
55
+ with gr.Tab("Audio Input"):
56
+ audio_input = gr.Audio(type="filepath", label="Upload or Record your event description")
57
+ audio_button = gr.Button("Generate from Audio")
58
+ audio_output = gr.Textbox(label="Generated Case Study", lines=15)
59
+ audio_button.click(fn=case_study_from_audio, inputs=audio_input, outputs=audio_output)
60
+
61
+ demo.launch()