youngtsai commited on
Commit
03cc228
·
1 Parent(s): 08c4adf

client.chat.completions.create

Browse files
Files changed (2) hide show
  1. app.py +9 -10
  2. requirements.txt +1 -1
app.py CHANGED
@@ -3,7 +3,7 @@ import random
3
  import string
4
  import gradio as gr
5
  import yt_dlp as ydlp
6
- import openai
7
  import re
8
 
9
  def ms_to_srt_time(ms):
@@ -123,7 +123,7 @@ def process_video(yt_id_or_url, openAI_key=None, password_secret=None):
123
  print(nonsilent_ranges)
124
 
125
  # Initialize OpenAI API client
126
- openai.api_key = openAI_key
127
 
128
  srt_content = ""
129
  counter = 1
@@ -133,16 +133,15 @@ def process_video(yt_id_or_url, openAI_key=None, password_secret=None):
133
  chunk.export("temp_chunk.wav", format="wav")
134
 
135
  with open("temp_chunk.wav", "rb") as audio_file:
136
- transcript = openai.Audio.transcribe(
137
  model="whisper-1",
138
- file=audio_file
 
139
  )
140
-
141
- transcription = transcript["text"]
142
 
143
  srt_content += f"{counter}\n"
144
  srt_content += f"{ms_to_srt_time(start)} --> {ms_to_srt_time(end)}\n"
145
- srt_content += f"{transcription}\n\n"
146
  counter += 1
147
 
148
  # 列印SRT
@@ -182,7 +181,7 @@ def process_video(yt_id_or_url, openAI_key=None, password_secret=None):
182
  ]
183
  }
184
 
185
- large_scope_srt_response = openai.ChatCompletion.create(**large_scope_srt_request_payload)
186
  large_scope_srt = large_scope_srt_response.choices[0].message.content.strip()
187
 
188
 
@@ -200,7 +199,7 @@ def process_video(yt_id_or_url, openAI_key=None, password_secret=None):
200
  }
201
  ]
202
  }
203
- summary_response = openai.ChatCompletion.create(**summary_request_payload)
204
  summary = summary_response.choices[0].message.content.strip()
205
 
206
  print("=========# 生成摘要==========")
@@ -222,7 +221,7 @@ def process_video(yt_id_or_url, openAI_key=None, password_secret=None):
222
  ]
223
  }
224
 
225
- mind_map_response = openai.ChatCompletion.create(**mind_map_request_payload)
226
  mind_map = mind_map_response.choices[0].message.content.strip()
227
  print("=========# 生成思維導圖==========")
228
  print(mind_map)
 
3
  import string
4
  import gradio as gr
5
  import yt_dlp as ydlp
6
+ from openai import OpenAI
7
  import re
8
 
9
  def ms_to_srt_time(ms):
 
123
  print(nonsilent_ranges)
124
 
125
  # Initialize OpenAI API client
126
+ client = OpenAI(api_key = openAI_key)
127
 
128
  srt_content = ""
129
  counter = 1
 
133
  chunk.export("temp_chunk.wav", format="wav")
134
 
135
  with open("temp_chunk.wav", "rb") as audio_file:
136
+ transcript = client.audio.transcriptions.create(
137
  model="whisper-1",
138
+ file=audio_file,
139
+ response_format="text"
140
  )
 
 
141
 
142
  srt_content += f"{counter}\n"
143
  srt_content += f"{ms_to_srt_time(start)} --> {ms_to_srt_time(end)}\n"
144
+ srt_content += f"{transcript}\n\n"
145
  counter += 1
146
 
147
  # 列印SRT
 
181
  ]
182
  }
183
 
184
+ large_scope_srt_response = client.chat.completions.create(**large_scope_srt_request_payload)
185
  large_scope_srt = large_scope_srt_response.choices[0].message.content.strip()
186
 
187
 
 
199
  }
200
  ]
201
  }
202
+ summary_response = client.chat.completions.create(**summary_request_payload)
203
  summary = summary_response.choices[0].message.content.strip()
204
 
205
  print("=========# 生成摘要==========")
 
221
  ]
222
  }
223
 
224
+ mind_map_response = client.chat.completions.create(**mind_map_request_payload)
225
  mind_map = mind_map_response.choices[0].message.content.strip()
226
  print("=========# 生成思維導圖==========")
227
  print(mind_map)
requirements.txt CHANGED
@@ -1,4 +1,4 @@
1
  yt-dlp
2
  gradio
3
  pydub
4
- openai
 
1
  yt-dlp
2
  gradio
3
  pydub
4
+ openai >= 1.0.0