|
|
import os |
|
|
import argparse |
|
|
import pandas as pd |
|
|
from tqdm import tqdm |
|
|
|
|
|
from concurrent.futures import ThreadPoolExecutor, as_completed |
|
|
import threading |
|
|
|
|
|
|
|
|
video_data_lock = threading.Lock() |
|
|
matched_count_lock = threading.Lock() |
|
|
|
|
|
def process_video_file(video_file, args, csv_video_mapping): |
|
|
"""处理单个视频文件的函数""" |
|
|
video_path = os.path.join(args.input_video_root, video_file) |
|
|
video_filename = os.path.splitext(video_file)[0] |
|
|
|
|
|
matched_row = None |
|
|
for csv_prefix, row in csv_video_mapping.items(): |
|
|
if video_filename.startswith(csv_prefix): |
|
|
matched_row = row |
|
|
break |
|
|
|
|
|
result = None |
|
|
if matched_row is not None: |
|
|
final_csv_path = os.path.join(args.output_csv_path, (video_filename + ".csv")) |
|
|
|
|
|
if os.path.exists(final_csv_path): |
|
|
|
|
|
try: |
|
|
import pandas as pd |
|
|
|
|
|
pd.read_csv(final_csv_path) |
|
|
return None |
|
|
except (pd.errors.EmptyDataError, pd.errors.ParserError, UnicodeDecodeError, FileNotFoundError) as e: |
|
|
|
|
|
print(f"Warning: CSV file {final_csv_path} is corrupted ({e}). Deleting and will recreate.") |
|
|
os.remove(final_csv_path) |
|
|
|
|
|
result = { |
|
|
'videoFile': video_filename + ".mp4", |
|
|
'cameraFile': matched_row['cameraFile'], |
|
|
'location': matched_row['location'], |
|
|
'scene': matched_row['scene'], |
|
|
'crowdDensity': matched_row['crowdDensity'], |
|
|
'weather': matched_row['weather'], |
|
|
'timeOfDay': matched_row['timeOfDay'], |
|
|
} |
|
|
else: |
|
|
print(f"Warning: No CSV record found for video file: {video_file}") |
|
|
|
|
|
return result |
|
|
|
|
|
|
|
|
def process_videos_multithreaded(video_files, args, csv_video_mapping, max_workers=4): |
|
|
video_data = [] |
|
|
matched_count = 0 |
|
|
|
|
|
with ThreadPoolExecutor(max_workers=max_workers) as executor: |
|
|
|
|
|
future_to_video = { |
|
|
executor.submit(process_video_file, video_file, args, csv_video_mapping): video_file |
|
|
for video_file in video_files |
|
|
} |
|
|
|
|
|
|
|
|
for future in tqdm(as_completed(future_to_video), total=len(video_files), desc="Processing videos"): |
|
|
video_file = future_to_video[future] |
|
|
try: |
|
|
result = future.result() |
|
|
if result is not None: |
|
|
with video_data_lock: |
|
|
video_data.append(result) |
|
|
with matched_count_lock: |
|
|
matched_count += 1 |
|
|
except Exception as exc: |
|
|
print(f'Video {video_file} generated an exception: {exc}') |
|
|
|
|
|
return video_data, matched_count |
|
|
|
|
|
def parse_args(): |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument( |
|
|
"--input_csv", |
|
|
type=str, |
|
|
default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking_updated.csv", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--input_video_root", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-386" |
|
|
) |
|
|
parser.add_argument( |
|
|
"--output_csv_path", |
|
|
type=str, |
|
|
default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/sekai-game-walking-386", |
|
|
) |
|
|
parser.add_argument( |
|
|
"--output_csv_file", |
|
|
type=str, |
|
|
default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/sekai-game-walking-386.csv", |
|
|
) |
|
|
parser.add_argument("--num_workers", type=int, default=16) |
|
|
args = parser.parse_args() |
|
|
return args |
|
|
|
|
|
if __name__ == "__main__": |
|
|
args = parse_args() |
|
|
|
|
|
|
|
|
df = pd.read_csv(args.input_csv) |
|
|
|
|
|
|
|
|
keep_columns = ['videoFile', 'cameraFile', 'caption', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay'] |
|
|
df = df[keep_columns].copy() |
|
|
|
|
|
|
|
|
csv_video_mapping = {} |
|
|
for idx, row in df.iterrows(): |
|
|
video_prefix = os.path.splitext(os.path.basename(row['videoFile']))[0] |
|
|
csv_video_mapping[video_prefix] = row |
|
|
|
|
|
|
|
|
video_files = [] |
|
|
for file in os.listdir(args.input_video_root): |
|
|
if file.lower().endswith(('.mp4', '.avi', '.mov', '.mkv', '.wmv', '.flv')): |
|
|
video_files.append(file) |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
video_data, matched_count = process_videos_multithreaded(video_files, args, csv_video_mapping, max_workers=args.num_workers) |
|
|
|
|
|
print(f"Successfully matched {matched_count} videos with CSV records") |
|
|
print(f"Total video data to process: {len(video_data)}") |
|
|
|
|
|
if video_data: |
|
|
output_df = pd.DataFrame(video_data) |
|
|
output_csv_file = args.output_csv_file |
|
|
output_df.to_csv(output_csv_file, index=False) |
|
|
print(f"Video data saved to: {output_csv_file}") |
|
|
print(f"Saved {len(video_data)} video records") |
|
|
else: |
|
|
output_df = pd.DataFrame() |
|
|
output_csv_file = args.output_csv_file |
|
|
output_df.to_csv(output_csv_file, index=False) |
|
|
print(f"Empty video data saved to: {output_csv_file}") |
|
|
print("No video data to save - created empty CSV file") |