| | import os |
| | import pandas as pd |
| | import argparse |
| | from tqdm import tqdm |
| |
|
| | def extract_uttid_from_video_file(video_file): |
| | """ |
| | 从videoFile列中提取uttid(去掉.mp4后缀) |
| | """ |
| | if video_file.endswith('.mp4'): |
| | return video_file[:-4] |
| | return video_file |
| |
|
| | def create_filtered_csv(csv_file, output_latent_folder, output_csv_file): |
| | """ |
| | 创建一个过滤后的CSV文件,只包含需要处理的样本 |
| | 只使用uttid匹配,不依赖其他元数据 |
| | """ |
| | |
| | df = pd.read_csv(csv_file) |
| | print(f"Original dataset size: {len(df)}") |
| | |
| | |
| | existing_files = set() |
| | if os.path.exists(output_latent_folder): |
| | for filename in os.listdir(output_latent_folder): |
| | if filename.endswith('.pt'): |
| | parts = filename[:-3].split('_') |
| | if len(parts) >= 4: |
| | uttid_parts = parts[:-3] |
| | uttid = '_'.join(uttid_parts) |
| | existing_files.add(uttid) |
| | |
| | print(f"Found {len(existing_files)} existing latent files") |
| | |
| | df_uttids = df['videoFile'].apply(extract_uttid_from_video_file) |
| | mask = ~df_uttids.isin(existing_files) |
| | filtered_df = df[mask] |
| |
|
| | |
| | os.makedirs(os.path.dirname(output_csv_file), exist_ok=True) |
| | filtered_df.to_csv(output_csv_file, index=False) |
| | |
| | print(f"Filtered dataset size: {len(filtered_df)}") |
| | print(f"Filtered CSV saved to: {output_csv_file}") |
| | |
| | return len(filtered_df) |
| |
|
| | def create_all_filtered_csvs(): |
| | """ |
| | 为所有数据集创建过滤后的CSV文件 |
| | """ |
| | base_csv_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/" |
| | base_output_latent_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/" |
| |
|
| | csv_paths = [ |
| | "sekai-game-walking-193_updated.csv", |
| | "sekai-real-walking-hq-193_updated.csv", |
| | "sekai-real-walking-hq-386_updated.csv", |
| | "sekai-game-walking-386_updated.csv" |
| | ] |
| | output_latent_paths = [ |
| | "sekai-game-walking-193/latents_stride1", |
| | "sekai-real-walking-hq-193/latents_stride1", |
| | "sekai-real-walking-hq-386/latents_stride2", |
| | "sekai-game-walking-386/latents_stride2" |
| | ] |
| |
|
| | for csv_path, output_latent_path in zip(csv_paths, output_latent_paths): |
| | original_csv = os.path.join(base_csv_path, csv_path) |
| | output_latent_folder = os.path.join(base_output_latent_path, output_latent_path) |
| | |
| | |
| | filtered_csv_name = csv_path.replace('_updated.csv', '_filtered.csv') |
| | filtered_csv_path = os.path.join(base_csv_path, filtered_csv_name) |
| | |
| | print(f"\nProcessing: {csv_path}") |
| | |
| | filtered_count = create_filtered_csv( |
| | csv_file=original_csv, |
| | output_latent_folder=output_latent_folder, |
| | output_csv_file=filtered_csv_path |
| | ) |
| | |
| | print(f"Created filtered CSV: {filtered_csv_path} with {filtered_count} samples") |
| |
|
| | def main(): |
| | parser = argparse.ArgumentParser(description="Create filtered CSV for processing") |
| | |
| | |
| | |
| | parser.add_argument("--batch", action="store_true", help="Process all datasets in batch") |
| | |
| | args = parser.parse_args() |
| | create_all_filtered_csvs() |
| |
|
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | if __name__ == "__main__": |
| | main() |
| |
|