Update app.py
Browse files
app.py
CHANGED
|
@@ -1,9 +1,10 @@
|
|
| 1 |
import os
|
|
|
|
| 2 |
from fastapi import FastAPI, UploadFile, Form, HTTPException
|
| 3 |
from fastapi.responses import JSONResponse
|
| 4 |
from openai import OpenAI
|
| 5 |
-
import gradio as gr
|
| 6 |
|
|
|
|
| 7 |
print("===== 🚀 啟動中 =====")
|
| 8 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 9 |
APP_PASSWORD = os.getenv("APP_PASSWORD")
|
|
@@ -13,18 +14,19 @@ print(f"APP_PASSWORD: {APP_PASSWORD}")
|
|
| 13 |
|
| 14 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 15 |
|
| 16 |
-
# === FastAPI 主
|
| 17 |
-
|
| 18 |
|
| 19 |
-
@
|
| 20 |
async def ping():
|
| 21 |
-
return {"status": "ok", "APP_PASSWORD": APP_PASSWORD
|
| 22 |
|
| 23 |
-
@
|
| 24 |
async def transcribe_api(file: UploadFile, token: str = Form(...)):
|
| 25 |
-
print(f"📥 收到
|
|
|
|
| 26 |
if not APP_PASSWORD:
|
| 27 |
-
raise HTTPException(status_code=500, detail="APP_PASSWORD not set")
|
| 28 |
if token != APP_PASSWORD:
|
| 29 |
raise HTTPException(status_code=403, detail="Forbidden: invalid token")
|
| 30 |
|
|
@@ -32,13 +34,13 @@ async def transcribe_api(file: UploadFile, token: str = Form(...)):
|
|
| 32 |
with open(temp_path, "wb") as f:
|
| 33 |
f.write(await file.read())
|
| 34 |
|
| 35 |
-
#
|
| 36 |
with open(temp_path, "rb") as audio_file:
|
| 37 |
transcript = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
|
| 38 |
text = transcript.text.strip()
|
| 39 |
|
| 40 |
-
#
|
| 41 |
-
summary_prompt = f"請
|
| 42 |
summary = client.chat.completions.create(
|
| 43 |
model="gpt-4o-mini",
|
| 44 |
messages=[{"role": "user", "content": summary_prompt}]
|
|
@@ -46,15 +48,14 @@ async def transcribe_api(file: UploadFile, token: str = Form(...)):
|
|
| 46 |
|
| 47 |
return JSONResponse({"text": text, "summary": summary})
|
| 48 |
|
| 49 |
-
# === Gradio
|
| 50 |
def gradio_ui(audio):
|
| 51 |
if audio is None:
|
| 52 |
return "請上傳音訊檔案", ""
|
| 53 |
with open(audio, "rb") as f:
|
| 54 |
transcript = client.audio.transcriptions.create(model="whisper-1", file=f)
|
| 55 |
text = transcript.text.strip()
|
| 56 |
-
|
| 57 |
-
summary_prompt = f"請幫我用中文摘要以下內容:\n\n{text}"
|
| 58 |
summary = client.chat.completions.create(
|
| 59 |
model="gpt-4o-mini",
|
| 60 |
messages=[{"role": "user", "content": summary_prompt}]
|
|
@@ -66,10 +67,14 @@ demo = gr.Interface(
|
|
| 66 |
inputs=gr.Audio(type="filepath", label="上傳音訊"),
|
| 67 |
outputs=[gr.Textbox(label="轉錄文字"), gr.Textbox(label="AI 摘要")],
|
| 68 |
title="LINE 語音轉錄與摘要 (API + UI)",
|
| 69 |
-
description="可透過
|
| 70 |
)
|
| 71 |
|
| 72 |
-
# ✅ 關鍵:
|
| 73 |
-
|
| 74 |
-
|
| 75 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
import os
|
| 2 |
+
import gradio as gr
|
| 3 |
from fastapi import FastAPI, UploadFile, Form, HTTPException
|
| 4 |
from fastapi.responses import JSONResponse
|
| 5 |
from openai import OpenAI
|
|
|
|
| 6 |
|
| 7 |
+
# === 初始化 ===
|
| 8 |
print("===== 🚀 啟動中 =====")
|
| 9 |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")
|
| 10 |
APP_PASSWORD = os.getenv("APP_PASSWORD")
|
|
|
|
| 14 |
|
| 15 |
client = OpenAI(api_key=OPENAI_API_KEY)
|
| 16 |
|
| 17 |
+
# === FastAPI 主要後端 ===
|
| 18 |
+
app = FastAPI(title="LINE Audio Transcriber")
|
| 19 |
|
| 20 |
+
@app.get("/ping")
|
| 21 |
async def ping():
|
| 22 |
+
return {"status": "ok", "APP_PASSWORD": bool(APP_PASSWORD)}
|
| 23 |
|
| 24 |
+
@app.post("/api/transcribe")
|
| 25 |
async def transcribe_api(file: UploadFile, token: str = Form(...)):
|
| 26 |
+
print(f"📥 收到請求: {file.filename}, token={token}")
|
| 27 |
+
|
| 28 |
if not APP_PASSWORD:
|
| 29 |
+
raise HTTPException(status_code=500, detail="APP_PASSWORD not set.")
|
| 30 |
if token != APP_PASSWORD:
|
| 31 |
raise HTTPException(status_code=403, detail="Forbidden: invalid token")
|
| 32 |
|
|
|
|
| 34 |
with open(temp_path, "wb") as f:
|
| 35 |
f.write(await file.read())
|
| 36 |
|
| 37 |
+
# whisper 轉錄
|
| 38 |
with open(temp_path, "rb") as audio_file:
|
| 39 |
transcript = client.audio.transcriptions.create(model="whisper-1", file=audio_file)
|
| 40 |
text = transcript.text.strip()
|
| 41 |
|
| 42 |
+
# AI 摘要
|
| 43 |
+
summary_prompt = f"請用繁體中文摘要以下內容:\n\n{text}"
|
| 44 |
summary = client.chat.completions.create(
|
| 45 |
model="gpt-4o-mini",
|
| 46 |
messages=[{"role": "user", "content": summary_prompt}]
|
|
|
|
| 48 |
|
| 49 |
return JSONResponse({"text": text, "summary": summary})
|
| 50 |
|
| 51 |
+
# === Gradio 介面 ===
|
| 52 |
def gradio_ui(audio):
|
| 53 |
if audio is None:
|
| 54 |
return "請上傳音訊檔案", ""
|
| 55 |
with open(audio, "rb") as f:
|
| 56 |
transcript = client.audio.transcriptions.create(model="whisper-1", file=f)
|
| 57 |
text = transcript.text.strip()
|
| 58 |
+
summary_prompt = f"請用繁體中文摘要以下內容:\n\n{text}"
|
|
|
|
| 59 |
summary = client.chat.completions.create(
|
| 60 |
model="gpt-4o-mini",
|
| 61 |
messages=[{"role": "user", "content": summary_prompt}]
|
|
|
|
| 67 |
inputs=gr.Audio(type="filepath", label="上傳音訊"),
|
| 68 |
outputs=[gr.Textbox(label="轉錄文字"), gr.Textbox(label="AI 摘要")],
|
| 69 |
title="LINE 語音轉錄與摘要 (API + UI)",
|
| 70 |
+
description="可透過捷徑上傳音訊並取得文字摘要"
|
| 71 |
)
|
| 72 |
|
| 73 |
+
# ✅ 關鍵修正:Spaces 預期的是 "app" 為 FastAPI,而不是混合物件
|
| 74 |
+
# 我們手動掛上 Gradio,而不是使用 mount_gradio_app
|
| 75 |
+
from gradio.routes import mount_gradio_app
|
| 76 |
+
|
| 77 |
+
gr_app = mount_gradio_app(app, demo, path="/")
|
| 78 |
+
|
| 79 |
+
# Hugging Face 需要一個變數叫 application
|
| 80 |
+
application = gr_app
|