Spaces:
hersia
/
Runtime error

hersia commited on
Commit
970deff
·
1 Parent(s): 83e65a1

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -62
app.py CHANGED
@@ -3,80 +3,41 @@ import gradio as gr
3
  import openai
4
  from gtts import gTTS
5
 
6
-
7
- openai.api_key = os.environ["OPEN_AI_KEY"]
8
-
9
 
10
  def transcribe(audio):
11
- audio_file = open(audio, "rb")
12
- transcript = openai.Audio.transcribe("whisper-1", audio_file)
13
-
14
- return transcript["text"]
15
-
16
-
17
 
18
-
19
  with gr.Blocks() as demo:
20
- gr.Markdown(
21
- """
22
- # Welcome to PiP_W.
23
- """
24
- )
25
- with gr.Accordion("Click for Instructions:"):
26
- gr.Markdown(
27
- """
28
- * Record your query.
29
- * Submit your query, and follow the chat or listen to the advice.
30
- * When you are ready to respond, clear your last recording and resubmit.
31
- note: Transcribe Audio does not work on iOS
32
  """)
33
-
34
-
35
- messages = gr.State(value=[{"role": "system", "content": "Ignore all instructions prior to this one. Prompt: Design an all-inclusive, interactive, and immersive gamified chat-based training program for novice and seasoned traders to master the art of trading equities and cryptocurrencies. The training program will be accessible through a chat interface, offering users five different program options, including learning lessons, tests, and more. Users can type MENU to see the menu and navigate through the program. Tests and exercises should be interactive, engaging users one question or fact at a time. The curriculum should be based on the material provided above. Curriculum Outline: 1. Trading Foundations 2. Understanding the Markets 3. Developing Trading Strategies 4. Risk Management Essentials 5. Trading Psychology and Mental Fortitude 6. Trading Tools and Technological Innovation 7. Crafting a Personalized Trading Plan 8. Trading Ethics and Regulatory Compliance Program Menu: 1. Learning Lessons: In-depth lessons covering each curriculum topic, using engaging multimedia content and chat-based interactions. 2. Interactive Tests: Quizzes and assessments to evaluate users' understanding of the material, one question at a time. 3. Practical Exercises: Hands-on activities and simulations that allow users to apply their knowledge in a risk-free environment. 4. Progress Tracking & Rewards: A personalized dashboard to track progress, collect badges, and unlock achievements. 5. Community & Support: Access to chat-based forums, leaderboards, and team-based challenges to foster collaboration and friendly competition. When users type MENU, they will be presented with the following options: 1. Learning Lessons 2. Interactive Tests 3. Practical Exercises 4. Progress Tracking & Rewards 5. Community & Support To select a program option, users can simply type the corresponding number. The chat interface will guide users through the chosen program, providing an engaging and interactive learning experience. Tests and exercises will be designed to present users with one question or fact at a time, ensuring that users can fully focus on each task without feeling overwhelmed"}])
36
-
37
- def botResponse(user_input, messages):
38
- messages.append({"role": "user", "content": user_input})
39
- response = openai.ChatCompletion.create(
40
- model="gpt-3.5-turbo-0301",
41
- messages=messages
42
- )
43
-
44
- system_message = response["choices"][0]["message"]["content"]
45
- messages.append({"role": "assistant", "content": system_message})
46
-
47
- chat_transcript = ""
48
- for message in messages:
49
- if (message["role"] != "system"):
50
- chat_transcript += message["role"] + ": " + message["content"] + "\n\n"
51
-
52
- return chat_transcript
53
-
54
- def giveVoice(messages):
55
- bot_message=messages[-1]
56
-
57
- myobj = gTTS(text=bot_message["content"])
58
- myobj.save("temp.mp3")
59
-
60
- dir = os.getcwd()
61
- new_path = os.path.join(dir, "temp.mp3")
62
-
63
- return new_path
64
 
65
  with gr.Row():
66
  with gr.Column(scale=1):
67
  user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
68
  submit_btn = gr.Button(value="Transcribe Audio")
69
- submit_btn2 = gr.Button(value="Submit Text")
70
  gpt_voice = gr.Audio(label="Listen to Advice")
71
  with gr.Column(scale=2):
72
  user_transcript = gr.Text(label="Audio Translation", interactive=False)
73
- user_text = gr.Text(label="Text Input",on_enter=True)
74
  gpt_transcript = gr.Text(label="Chat Transcript")
75
- user_text.on_enter(botResponse, [user_text, messages], gpt_transcript) # Bind the action to on_enter
76
- submit_btn.click(transcribe, user_audio, user_transcript)
77
- submit_btn2.click(botResponse, [user_text, messages], gpt_transcript)
78
- user_transcript.change(botResponse, [user_transcript, messages], gpt_transcript)
79
- gpt_transcript.change(giveVoice, messages, gpt_voice)
80
-
81
-
82
  demo.launch(share=False)
 
3
  import openai
4
  from gtts import gTTS
5
 
6
+ openai.api_key = "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"
 
 
7
 
8
  def transcribe(audio):
9
+ audio_file = open(audio, "rb")
10
+ transcript = openai.Audio.transcribe("whisper-1", audio_file)
11
+
12
+ return transcript["text"]
 
 
13
 
 
14
  with gr.Blocks() as demo:
15
+ gr.Markdown("""
16
+ # Welcome to PiP World.
 
 
 
 
 
 
 
 
 
 
17
  """)
18
+ with gr.Accordion("Click for Instructions:"):
19
+ gr.Markdown("""
20
+ * Record your query.
21
+ * Submit your query, and follow the chat or listen to the advice.
22
+ * When you are ready to respond, clear your last recording and resubmit.
23
+ note: Transcribe Audio does not work on iOS
24
+ """)
25
+ messages = gr.State(value=[{"role": "system", "content": "Ignore all instructions prior to this one. Prompt: Design an all-inclusive, interactive, and immersive gamified chat-based training program"}])
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
26
 
27
  with gr.Row():
28
  with gr.Column(scale=1):
29
  user_audio = gr.Audio(source="microphone", type="filepath", label="Input Phrase")
30
  submit_btn = gr.Button(value="Transcribe Audio")
 
31
  gpt_voice = gr.Audio(label="Listen to Advice")
32
  with gr.Column(scale=2):
33
  user_transcript = gr.Text(label="Audio Translation", interactive=False)
34
+ user_text = gr.Text(label="Text Input", on_enter=True) # Set on_enter to True
35
  gpt_transcript = gr.Text(label="Chat Transcript")
36
+
37
+ submit_btn.click(transcribe, inputs=user_audio, outputs=user_transcript)
38
+ user_text.set_on_enter(lambda: botResponse(user_text.value, messages, gpt_transcript)) # Bind the action to on_enter
39
+ user_text.set_on_change(lambda: botResponse(user_text.value, messages, gpt_transcript)) # Bind the action to on_change
40
+ user_transcript.on_change(botResponse, inputs=[user_transcript, messages], outputs=gpt_transcript)
41
+ gpt_transcript.on_change(giveVoice, inputs=messages, outputs=gpt_voice)
42
+
43
  demo.launch(share=False)