useful_code / dataset_code /spatialvid /add_config_step0.py
SuperCS's picture
Add files using upload-large-folder tool
e051419 verified
import pandas as pd
import cv2
import os
from pathlib import Path
from concurrent.futures import ThreadPoolExecutor, as_completed
from threading import Lock
import time
import ffmpeg
class VideoProcessor:
def __init__(self, max_workers=4):
self.max_workers = max_workers
self.progress_lock = Lock()
self.processed_count = 0
self.total_count = 0
# def get_video_properties(self, video_path):
# """
# 获取视频的基本属性:高度、宽度、帧率
# Args:
# video_path (str): 视频文件路径
# Returns:
# tuple: (height, width, fps) 或 (None, None, None) 如果读取失败
# """
# try:
# # 打开视频文件
# cap = cv2.VideoCapture(video_path)
# if not cap.isOpened():
# return None, None, None
# # 获取视频属性
# num_frame = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
# width = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
# height = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# fps = cap.get(cv2.CAP_PROP_FPS)
# # 释放视频捕获对象
# cap.release()
# return num_frame, height, width, fps
# except Exception as e:
# print(f"读取视频 {video_path} 时出错: {str(e)}")
# return None, None, None
def get_video_properties(self, video_path):
try:
probe = ffmpeg.probe(video_path)
video_stream = next((stream for stream in probe['streams']
if stream['codec_type'] == 'video'), None)
if not video_stream:
return None, None, None, None
width = int(video_stream['width'])
height = int(video_stream['height'])
fps = eval(video_stream['r_frame_rate'])
if 'nb_frames' in video_stream:
num_frames = int(video_stream['nb_frames'])
else:
duration = float(probe['format']['duration'])
num_frames = int(duration * fps)
return num_frames, height, width, fps
except Exception as e:
print(f"读取视频 {video_path} 时出错: {str(e)}")
return None, None, None, None
def process_single_video(self, args):
"""
处理单个视频文件
Args:
args: (idx, video_file, video_dir)
Returns:
tuple: (idx, num_frame, height, width, fps, success, message)
"""
idx, video_file, video_dir = args
video_path = os.path.join(video_dir, video_file)
# 检查视频文件是否存在
if not os.path.exists(video_path):
message = f"视频文件不存在: {video_path}"
return idx, None, None, None, None, False, message
# 获取视频属性
num_frame, height, width, fps = self.get_video_properties(video_path)
# 更新进度
with self.progress_lock:
self.processed_count += 1
progress = (self.processed_count / self.total_count) * 100
if height is not None:
message = f"[{self.processed_count}/{self.total_count}] ({progress:.1f}%) {video_file}{num_frame}, {width}x{height}, {fps:.2f}fps"
success = True
fps = round(fps, 2)
else:
message = f"[{self.processed_count}/{self.total_count}] ({progress:.1f}%) {video_file} → 获取信息失败"
success = False
print(message)
return idx, num_frame, height, width, fps, success, message
def process_video_csv(self, csv_path, video_dir="./", output_csv_path=None, max_workers=None):
# """
# 多线程处理CSV文件,添加视频的height、width、fps信息
# Args:
# csv_path (str): 输入CSV文件路径
# video_dir (str): 视频文件所在目录
# output_csv_path (str): 输出CSV文件路径,如果为None则覆盖原文件
# max_workers (int): 最大线程数,如果为None则使用初始化时的值
# """
if max_workers is None:
max_workers = self.max_workers
# try:
# 读取CSV文件
df = pd.read_csv(csv_path)
self.total_count = len(df)
self.processed_count = 0
print(f"成功读取CSV文件,共 {len(df)} 行数据")
print(f"使用 {max_workers} 个线程进行处理...")
# 初始化新列
df['new_num_frame'] = None
df['new_height'] = None
df['new_width'] = None
df['new_fps'] = None
# 准备任务列表
tasks = [(idx, row['video path'], video_dir) for idx, row in df.iterrows()]
# 记录开始时间
start_time = time.time()
# 使用线程池执行任务
with ThreadPoolExecutor(max_workers=max_workers) as executor:
# 提交所有任务
future_to_task = {executor.submit(self.process_single_video, task): task for task in tasks}
# 处理完成的任务
for future in as_completed(future_to_task):
idx, num_frame, height, width, fps, success, message = future.result()
# 更新DataFrame
if success and height is not None:
df.at[idx, 'new_num_frame'] = num_frame
df.at[idx, 'new_height'] = height
df.at[idx, 'new_width'] = width
df.at[idx, 'new_fps'] = fps
# 计算处理时间
end_time = time.time()
processing_time = end_time - start_time
# 保存结果
if output_csv_path is None:
output_csv_path = csv_path
df.to_csv(output_csv_path, index=False)
# 显示统计信息
valid_videos = df['new_height'].notna().sum()
print(f"\n{'='*60}")
print(f"处理完成!")
print(f"总处理时间: {processing_time:.2f}秒")
print(f"平均每个视频: {processing_time/len(df):.2f}秒")
print(f"成功处理视频数量: {valid_videos}/{len(df)}")
print(f"结果已保存到: {output_csv_path}")
print(f"{'='*60}")
return df
# except Exception as e:
# print(f"处理过程中出错: {str(e)}")
# return None
# 便捷函数
def process_video_csv_multithread(csv_path, video_dir="./", output_csv_path=None, max_workers=4):
"""
便捷的多线程视频处理函数
Args:
csv_path (str): 输入CSV文件路径
video_dir (str): 视频文件所在目录
output_csv_path (str): 输出CSV文件路径
max_workers (int): 最大线程数
"""
processor = VideoProcessor(max_workers=max_workers)
return processor.process_video_csv(csv_path, video_dir, output_csv_path, max_workers)
# 使用示例
if __name__ == "__main__":
# 配置参数
# base_names = ["sekai-real-walking-hq-193", "sekai-game-walking-193", "sekai-real-walking-hq-386", "sekai-game-walking-386"]
# base_names = ["sekai-real-walking-hq-193"]
# base_names = ["sekai-game-walking-193"]
# base_names = ["sekai-real-walking-hq-386"]
base_names = ["sekai-game-walking-386"]
for base_name in base_names:
csv_file_path = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/data/train/SpatialVID_HQ_metadata.csv"
video_directory = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final"
output_file_path = f"/mnt/bn/yufan-dev-my/ysh/Ckpts/SpatialVID/SpatialVID-HQ-Final/data/SpatialVID_HQ_step0.csv"
thread_count = 192
# 方法1: 使用便捷函数
result_df = process_video_csv_multithread(
csv_path=csv_file_path,
video_dir=video_directory,
output_csv_path=output_file_path,
max_workers=thread_count
)
# 方法2: 使用类的方式(更灵活)
"""
processor = VideoProcessor(max_workers=thread_count)
result_df = processor.process_video_csv(
csv_path=csv_file_path,
video_dir=video_directory,
output_csv_path=output_file_path
)
"""
# 显示前几行结果
if result_df is not None:
print("\n处理后的数据预览:")
print(result_df[['videoFile', 'new_num_frame', 'new_height', 'new_width', 'new_fps']].head())
# 显示一些统计信息
print(f"\n视频分辨率统计:")
resolution_stats = result_df.groupby(['new_width', 'new_height']).size().reset_index(name='count')
print(resolution_stats.head(10))