Ryanus commited on
Commit
abf663a
·
verified ·
1 Parent(s): 1bce3e3

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +14 -39
app.py CHANGED
@@ -1,44 +1,19 @@
1
  import gradio as gr
2
- import os
3
- from scenedetect import open_video, SceneManager
4
- from scenedetect.detectors import ContentDetector
5
 
6
- OUTPUT_DIR = "outputs"
7
- os.makedirs(OUTPUT_DIR, exist_ok=True)
 
8
 
9
- def detect_scenes(video_path):
10
- if not video_path or not os.path.exists(video_path):
11
- return "未找到影片檔案,請確認上傳檔案無誤", None
12
 
13
- try:
14
- video = open_video(video_path)
15
- manager = SceneManager()
16
- manager.add_detector(ContentDetector())
17
- manager.detect_scenes(video)
18
- scene_list = manager.get_scene_list()
19
 
20
- # 組合分鏡資料為 CSV 內容
21
- rows = [["Scene Number", "Start Time", "End Time"]]
22
- for i, (start, end) in enumerate(scene_list):
23
- rows.append([str(i + 1), str(start.get_timecode()), str(end.get_timecode())])
24
-
25
- fname, _ = os.path.splitext(os.path.basename(video_path))
26
- csv_path = os.path.join(OUTPUT_DIR, fname + "_scenes.csv")
27
- with open(csv_path, "w", encoding='utf-8') as f:
28
- for row in rows:
29
- f.write(",".join(row) + "\n")
30
-
31
- return f"偵測到 {len(scene_list)} 組分鏡,CSV 可下載", csv_path
32
- except Exception as e:
33
- return f"解析失敗:{e}", None
34
-
35
- iface = gr.Interface(
36
- fn=detect_scenes,
37
- inputs=gr.Video(label="上傳影片"),
38
- outputs=[gr.Textbox(label="分鏡結果"), gr.File(label="下載分鏡 CSV")],
39
- title="PySceneDetect 分鏡偵測(可儲存 CSV)",
40
- description="上傳影片,自動產生分鏡資訊,並可下載 CSV。支援 CPU 部署。"
41
- )
42
-
43
- if __name__ == "__main__":
44
- iface.launch()
 
1
  import gradio as gr
2
+ import torch
3
+ from transformers import CLIPProcessor, CLIPModel
 
4
 
5
+ # 載入 FunCLIP (假設 FunCLIP 是基於 CLIP 結構衍生)
6
+ model_name = "modelscope/funclip" # 可替換為真正FunCLIP的模型名稱
7
+ device = "cpu"
8
 
9
+ processor = CLIPProcessor.from_pretrained(model_name)
10
+ model = CLIPModel.from_pretrained(model_name).to(device)
 
11
 
12
+ def funclip_inference(image):
13
+ inputs = processor(images=image, return_tensors="pt").to(device)
14
+ outputs = model.get_image_features(**inputs)
15
+ # 這裡可替換為 funclip 具體後處理
16
+ return outputs.detach().cpu().numpy()
 
17
 
18
+ iface = gr.Interface(fn=funclip_inference, inputs=gr.Image(type="pil"), outputs=gr.Textbox())
19
+ iface.launch()