syingxi commited on
Commit
1339169
·
verified ·
1 Parent(s): 63b3924

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +8 -2
app.py CHANGED
@@ -10,7 +10,9 @@ from datasets import config
10
 
11
  hf_token = os.environ['hf_token'] # 确保环境变量中有你的令牌
12
 
13
- local_dir = "VBench_sampled_video" # 本地文件夹路径
 
 
14
  # dataset = load_dataset("Vchitect/VBench_sampled_video")
15
  # print(os.listdir("~/.cache/huggingface/datasets/Vchitect___VBench_sampled_video/"))
16
  # root = "~/.cache/huggingface/datasets/Vchitect___VBench_sampled_video/"
@@ -33,8 +35,10 @@ os.makedirs(local_dir, exist_ok=True)
33
  hf_api = HfApi(endpoint="https://huggingface.co", token=hf_token)
34
  hf_api = HfApi(token=hf_token)
35
  repo_id = "Vchitect/VBench_sampled_video"
 
36
 
37
  model_names=[]
 
38
  for i in hf_api.list_repo_tree('Vchitect/VBench_sampled_video',repo_type='dataset'):
39
  model_name = i.path
40
  if '.git' not in model_name and '.md' not in model_name:
@@ -52,6 +56,8 @@ with open("videos_by_dimension.json") as f:
52
  # all_videos = json.load(f)
53
 
54
  types = ['appearance_style', 'color', 'temporal_style', 'spatial_relationship', 'temporal_flickering', 'scene', 'multiple_objects', 'object_class', 'human_action', 'overall_consistency', 'subject_consistency']
 
 
55
 
56
  def get_video_path_local(model_name, type, prompt):
57
  if 'Show-1' in model_name:
@@ -183,7 +189,7 @@ def show_feedback_button():
183
  with gr.Blocks() as interface:
184
  gr.Markdown("# VBench Video Arena")
185
  gr.Markdown("""
186
- **VBench Video Arena: Watch AI-Generated Videos Instantly** (powered by [VBench](https://github.com/Vchitect/VBench) and [VBench Leaderboard](https://huggingface.co/spaces/Vchitect/VBench_Leaderboard))\n
187
  - **Random 2 Videos**: Randomly selects two models to compare on the same ability dimension and text prompt.\n
188
  - **Play Selection** Allows users to choose a model, dimension, and text prompt from drop-down menus and view the corresponding videos. """)
189
 
 
10
 
11
  hf_token = os.environ['hf_token'] # 确保环境变量中有你的令牌
12
 
13
+ local_dir = "VBench-I2V_sampled_video" # 本地文件夹路径
14
+
15
+
16
  # dataset = load_dataset("Vchitect/VBench_sampled_video")
17
  # print(os.listdir("~/.cache/huggingface/datasets/Vchitect___VBench_sampled_video/"))
18
  # root = "~/.cache/huggingface/datasets/Vchitect___VBench_sampled_video/"
 
35
  hf_api = HfApi(endpoint="https://huggingface.co", token=hf_token)
36
  hf_api = HfApi(token=hf_token)
37
  repo_id = "Vchitect/VBench_sampled_video"
38
+ # repo_id = "Vchitect/VBench-I2V_sampled_video"
39
 
40
  model_names=[]
41
+ # for i in hf_api.list_repo_tree('Vchitect/VBench-I2V_sampled_video',repo_type='dataset'):
42
  for i in hf_api.list_repo_tree('Vchitect/VBench_sampled_video',repo_type='dataset'):
43
  model_name = i.path
44
  if '.git' not in model_name and '.md' not in model_name:
 
56
  # all_videos = json.load(f)
57
 
58
  types = ['appearance_style', 'color', 'temporal_style', 'spatial_relationship', 'temporal_flickering', 'scene', 'multiple_objects', 'object_class', 'human_action', 'overall_consistency', 'subject_consistency']
59
+ # types = ['subject_consistency', 'background_consistency', 'motion_smoothness', 'dynamic_degree', 'aesthetic_quality', 'imaging_quality', 'i2v_subject', 'i2v_background', 'camera_motion']
60
+
61
 
62
  def get_video_path_local(model_name, type, prompt):
63
  if 'Show-1' in model_name:
 
189
  with gr.Blocks() as interface:
190
  gr.Markdown("# VBench Video Arena")
191
  gr.Markdown("""
192
+ **VBench-I2V Video Arena: Watch AI-Generated Videos Instantly** (powered by [VBench](https://github.com/Vchitect/VBench) and [VBench Leaderboard](https://huggingface.co/spaces/Vchitect/VBench_Leaderboard))\n
193
  - **Random 2 Videos**: Randomly selects two models to compare on the same ability dimension and text prompt.\n
194
  - **Play Selection** Allows users to choose a model, dimension, and text prompt from drop-down menus and view the corresponding videos. """)
195