# -*- coding: utf-8 -*- import gradio as gr import whisper import os import subprocess import pandas as pd import tempfile import traceback from docx import Document import time import numpy as np import soundfile as sf from scipy import signal from datetime import datetime # 模型速度設置(調整後的值) model_speed = { "tiny.en": 1.0, # 原為 2.0 "tiny": 0.8, # 原為 2.0 "base.en": 0.7, # 原為 1.5 "base": 0.6, # 原為 1.5 "small.en": 0.5, # 原為 1.0 "small": 0.4, # 原為 1.0 "medium.en": 0.3, # 原為 0.75 "medium": 0.25, # 原為 0.75 "large-v1": 0.2, # 原為 0.5 "large-v2": 0.2, # 原為 0.5 "large-v3": 0.2, # 原為 0.5 "large": 0.2, # 原為 0.5 "large-v3-turbo": 0.25, # 原為 0.6 "turbo": 0.25 # 原為 0.6 } # "tiny.en": 32.0, # 最快速的模型 # "tiny": 32.0, # "base.en": 16.0, # 比tiny慢一半 # "base": 16.0, # "small.en": 6.0, # 比base慢約2.7倍 # "small": 6.0, # "medium.en": 2.0, # 比small慢3倍 # "medium": 2.0, # "large-v1": 1.0, # 最慢的基準模型 # "large-v2": 1.0, # "large-v3": 1.0, # "large": 1.0, # "large-v3-turbo": 1.5, # turbo版本稍快 # "turbo": 1.5 # 添加模型特色說明 model_features = { "tiny.en": "最小且最快的英文專用模型,適合簡單的英文語音。", "tiny": "最小且最快的多語言模型,適合簡單的多語言語音。", "base.en": "基礎英文專用模型,平衡速度和準確度。", "base": "基礎多語言模型,平衡速度和準確度。", "small.en": "較準確的英文專用模型,適合一般英文轉錄。", "small": "較準確的多語言模型,適合一般多語言轉錄。", "medium.en": "高準確度的英文專用模型,適合複雜英文內容。", "medium": "高準確度的多語言模型,適合複雜多語言內容。", "large-v1": "最早版本的大型模型,提供最佳準確度。", "large-v2": "改進版大型模型,提供更好的多語言支援。", "large-v3": "最新版大型模型,整體性能更優。", "large": "大型模型的最新版本別名。", "large-v3-turbo": "針對速度優化的 large-v3 模型。", "turbo": "針對速度優化的最新模型別名。" } # 定義可用模型選項 model_choices = list(model_speed.keys()) # 支援的音訊和影像格式 supported_audio_formats = [".mp3", ".wav", ".flac", ".aac", ".ogg", ".m4a"] supported_video_formats = [".mp4", ".mov", ".avi", ".mkv", ".webm"] # 支援的導出格式 export_formats = ['.txt', '.md', '.srt', '.docx'] # 全局模型緩存 loaded_models = {} def save_audio(audio_data, sr): try: if audio_data is None: print("無效的音頻數據") return None print(f"原始音頻數據類型: {type(audio_data)}") print(f"原始採樣率: {sr[:10]}...") # 創建臨時音頻文件 with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio: # 使用固定採樣率 target_sr = 16000 # 使用採樣率數組作為音頻數據,並進行重採樣 if isinstance(sr, np.ndarray): audio_data = sr.astype(np.float32) # 假設原始採樣率為44100 original_sr = 44100 # 計算重採樣後的長度 new_length = int(len(audio_data) * target_sr / original_sr) # 重採樣 audio_data = signal.resample(audio_data, new_length) else: print("無法獲取有效的音頻數據") return None # 確保是二維數組 if audio_data.ndim == 1: audio_data = audio_data.reshape(-1, 1) # 歸一化音頻數據 max_val = np.abs(audio_data).max() if max_val > 0: audio_data = audio_data / max_val print(f"處理後的音頻數據形狀: {audio_data.shape}, 採樣率: {target_sr}, 數據類型: {audio_data.dtype}") print(f"音頻數據範圍: [{audio_data.min():.3f}, {audio_data.max():.3f}]") # 保存音頻文件 sf.write(temp_audio.name, audio_data, target_sr) # 驗證文件 if os.path.exists(temp_audio.name) and os.path.getsize(temp_audio.name) > 0: print(f"成功創建音頻文件: {temp_audio.name}") return temp_audio.name else: print("音頻文件創建失敗") return None except Exception as e: print(f"保存音頻失敗: {str(e)}") traceback.print_exc() return None # 顯示所有模型的預估轉錄時間 def estimate_all_models_transcription_time(file_path): try: file_extension = os.path.splitext(file_path)[1].lower() # 如果是影片文件,提取音訊 if file_extension in supported_video_formats: with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio: audio_path = extract_audio_from_video(file_path, temp_audio.name) audio_duration = get_media_duration(audio_path) os.remove(audio_path) # 刪除臨時音訊文件 else: audio_duration = get_media_duration(file_path) # 建立預估時間的表格 estimates = [] for model_name in model_choices: estimated_time = audio_duration / model_speed[model_name] estimates.append({"模型名稱": model_name, "模型特色": model_features[model_name], "預估轉錄時間 (秒)": f"{estimated_time:.2f}"}) df = pd.DataFrame(estimates) return df except Exception as e: print(f"估算轉錄時間失敗: {str(e)}\n{traceback.format_exc()}") return f"估算轉錄時間失敗: {str(e)}" def display_model_estimations(file): if file is not None: return estimate_all_models_transcription_time(file.name) else: return "請上傳音訊或影片文件" # 加載遠端模型 # def load_model(model_name): # if model_name not in loaded_models: # print(f"正在加載模型:{model_name}") # try: # loaded_models[model_name] = whisper.load_model(model_name) # except Exception as e: # print(f"模型加載失敗: {str(e)}\n{traceback.format_exc()}") # raise RuntimeError(f"模型加載失敗: {str(e)}") # return loaded_models[model_name] # 加載本地模型 def load_model(model_name): if model_name not in loaded_models: print(f"正在加載本地模型:{model_name}") try: model_path = f"./models/{model_name}.pt" # 本地模型路徑 loaded_models[model_name] = whisper.load_model(model_path) # 加載本地模型 except Exception as e: print(f"模型加載失敗: {str(e)}\n{traceback.format_exc()}") raise RuntimeError(f"模型加載失敗: {str(e)}") return loaded_models[model_name] # 獲取媒體長度 def get_media_duration(file_path): try: command = [ "ffprobe", "-v", "error", "-show_entries", "format=duration", "-of", "default=noprint_wrappers=1:nokey=1", file_path ] result = subprocess.run(command, stdout=subprocess.PIPE, stderr=subprocess.PIPE, text=True, check=True) duration = float(result.stdout.strip()) return duration except subprocess.CalledProcessError as e: raise RuntimeError(f"獲取媒體時長失敗: {e.stderr.strip()}") except Exception as e: raise RuntimeError(f"其他錯誤: {str(e)}\n{traceback.format_exc()}") # 提取影片音訊 def extract_audio_from_video(video_path, output_audio_path="extracted_audio.wav"): try: command = [ "ffmpeg", "-i", video_path, "-vn", "-acodec", "pcm_s16le", "-ar", "44100", "-ac", "2", output_audio_path ] subprocess.run(command, check=True, stdout=subprocess.PIPE, stderr=subprocess.PIPE) return output_audio_path except subprocess.CalledProcessError as e: raise RuntimeError(f"音訊提取失敗: {e.stderr.decode()}") def save_transcription_to_file(transcription, export_format, include_timestamps, result, file_name): if include_timestamps and "segments" in result: # 包含時間軸的處理 if export_format == ".srt": srt_content = "" for i, segment in enumerate(result["segments"], start=1): start = segment['start'] end = segment['end'] text = segment['text'] srt_content += f"{i}\n{format_srt_time(start)} --> {format_srt_time(end)}\n{text}\n\n" transcription = srt_content elif export_format == ".docx": doc = Document() for segment in result["segments"]: start = segment['start'] end = segment['end'] text = segment['text'] doc.add_paragraph(f"[{format_srt_time(start)} - {format_srt_time(end)}] {text}") doc.save(file_name) return file_name else: # 不包含時間軸的處理 transcription = "\n".join(segment["text"] for segment in result.get("segments", [{"text": transcription}])) # 保存純文字內容或包含時間軸內容到文件 if export_format == ".txt": with open(file_name, "w", encoding='utf-8') as f: f.write(transcription) elif export_format == ".md": with open(file_name, "w", encoding='utf-8') as f: f.write(transcription) elif export_format == ".srt": with open(file_name, "w", encoding='utf-8') as f: f.write(transcription) elif export_format == ".docx": doc = Document() doc.add_paragraph(transcription) doc.save(file_name) return file_name # 格式化時間為 SRT 格式 mm:ss def format_srt_time(seconds): minutes = int(seconds // 60) seconds = int(seconds % 60) return f"{minutes:02}:{seconds:02}" # 全局變數,用於存儲轉錄結果 transcription_result = {} def transcribe_and_export_with_progress(model_name, file, prompt): global transcription_result if file is None: yield "請上傳音訊或影片檔案", None return try: file_extension = os.path.splitext(file.name)[1].lower() # 提取音訊(若為影片) if file_extension in supported_video_formats: yield "開始提取音訊...", None with tempfile.NamedTemporaryFile(suffix=".wav", delete=False) as temp_audio: audio_path = extract_audio_from_video(file.name, temp_audio.name) else: audio_path = file.name # 獲取音訊總時長 total_duration = get_media_duration(audio_path) model_speed_factor = model_speed.get(model_name, 1.0) # 使用模型速度 estimated_total_time = total_duration / model_speed_factor # 預估轉錄時間 yield f"準備開始轉錄\n音訊總長度:{total_duration:.2f} 秒,預計完成時間:{estimated_total_time:.2f} 秒", None # 開始轉錄並顯示進度 print("開始轉錄音訊...") model = load_model(model_name) # 開始計時 start_time = time.time() def update_progress(): current_time = time.time() - start_time if current_time <= estimated_total_time: remaining_time = max(0, estimated_total_time - current_time) return (f"轉錄進行中...\n" f"已經過時間:{current_time:.1f} 秒\n" f"預估剩餘時間:{remaining_time:.1f} 秒") else: return (f"轉錄進行中...\n" f"已經過時間:{current_time:.1f} 秒\n" f"尚在處理中,請耐心等候~") # 每秒更新進度 while time.time() - start_time < estimated_total_time: yield update_progress(), None time.sleep(1) result = model.transcribe(audio_path, initial_prompt=prompt, word_timestamps=True) # 生成轉錄文本 transcription = "" for segment in result["segments"]: start = segment["start"] end = segment["end"] text = segment["text"] transcription += f"[{format_srt_time(start)} - {format_srt_time(end)}] {text}\n" # 計算總耗時 total_time = time.time() - start_time # 將結果保存到全局變數中 transcription_result = result # 最終完成時返回完整轉錄結果 yield f"轉錄完成!\n總耗時:{total_time:.1f} 秒", transcription except Exception as e: error_message = f"處理過程中出現錯誤: {str(e)}\n{traceback.format_exc()}" print(error_message) yield error_message, None # 設置 Gradio 界面 with gr.Blocks(theme=gr.themes.Default(primary_hue=gr.themes.colors.yellow, secondary_hue=gr.themes.colors.red)) as interface: # 標題和描述 gr.HTML("""