File size: 4,458 Bytes
e31e7b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
import os
import pandas as pd
import argparse
from tqdm import tqdm

def extract_uttid_from_video_file(video_file):
    """
    从videoFile列中提取uttid(去掉.mp4后缀)
    """
    if video_file.endswith('.mp4'):
        return video_file[:-4]  # 去掉.mp4
    return video_file

def create_filtered_csv(csv_file, output_latent_folder, output_csv_file):
    """
    创建一个过滤后的CSV文件,只包含需要处理的样本
    只使用uttid匹配,不依赖其他元数据
    """
    # 读取原始CSV
    df = pd.read_csv(csv_file)
    print(f"Original dataset size: {len(df)}")
    
    # 获取已经存在的latent文件
    existing_files = set()
    if os.path.exists(output_latent_folder):
        for filename in os.listdir(output_latent_folder):
            if filename.endswith('.pt'):
                parts = filename[:-3].split('_')
                if len(parts) >= 4:  # 至少要有uttid + 3个元数据
                    uttid_parts = parts[:-3]
                    uttid = '_'.join(uttid_parts)
                existing_files.add(uttid)
    
    print(f"Found {len(existing_files)} existing latent files")
    
    df_uttids = df['videoFile'].apply(extract_uttid_from_video_file)
    mask = ~df_uttids.isin(existing_files)
    filtered_df = df[mask]

    # 保存到新的CSV文件
    os.makedirs(os.path.dirname(output_csv_file), exist_ok=True)
    filtered_df.to_csv(output_csv_file, index=False)
    
    print(f"Filtered dataset size: {len(filtered_df)}")
    print(f"Filtered CSV saved to: {output_csv_file}")
    
    return len(filtered_df)

def create_all_filtered_csvs():
    """
    为所有数据集创建过滤后的CSV文件
    """
    base_csv_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/"
    base_output_latent_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/"

    csv_paths = [
        "sekai-game-walking-193_updated.csv", 
        "sekai-real-walking-hq-193_updated.csv", 
        "sekai-real-walking-hq-386_updated.csv", 
        "sekai-game-walking-386_updated.csv"
    ]
    output_latent_paths = [
        "sekai-game-walking-193/latents_stride1", 
        "sekai-real-walking-hq-193/latents_stride1", 
        "sekai-real-walking-hq-386/latents_stride2", 
        "sekai-game-walking-386/latents_stride2"
    ]

    for csv_path, output_latent_path in zip(csv_paths, output_latent_paths):
        original_csv = os.path.join(base_csv_path, csv_path)
        output_latent_folder = os.path.join(base_output_latent_path, output_latent_path)
        
        # 创建过滤后的CSV文件名
        filtered_csv_name = csv_path.replace('_updated.csv', '_filtered.csv')
        filtered_csv_path = os.path.join(base_csv_path, filtered_csv_name)
        
        print(f"\nProcessing: {csv_path}")
        
        filtered_count = create_filtered_csv(
            csv_file=original_csv,
            output_latent_folder=output_latent_folder,
            output_csv_file=filtered_csv_path
        )
        
        print(f"Created filtered CSV: {filtered_csv_path} with {filtered_count} samples")

def main():
    parser = argparse.ArgumentParser(description="Create filtered CSV for processing")
    # parser.add_argument("--csv_file", type=str, help="Original CSV file path")
    # parser.add_argument("--output_latent_folder", type=str, help="Output latent folder path")
    # parser.add_argument("--output_csv_file", type=str, help="Output filtered CSV file path")
    parser.add_argument("--batch", action="store_true", help="Process all datasets in batch")
    
    args = parser.parse_args()
    create_all_filtered_csvs()

    # if args.batch:
    #     # 批量处理所有数据集
    #     create_all_filtered_csvs()
    # else:
    #     # 单个处理
    #     if not all([args.csv_file, args.output_latent_folder, args.output_csv_file]):
    #         print("Error: For single processing, --csv_file, --output_latent_folder, and --output_csv_file are required")
    #         return
            
    #     filtered_count = create_filtered_csv(
    #         csv_file=args.csv_file,
    #         output_latent_folder=args.output_latent_folder,
    #         output_csv_file=args.output_csv_file
    #     )
        
    #     if filtered_count == 0:
    #         print("No samples need processing!")
    #     else:
    #         print(f"Ready to process {filtered_count} samples")

if __name__ == "__main__":
    main()