SuperCS's picture
Add files using upload-large-folder tool
e31e7b4 verified
raw
history blame
12.8 kB
from vllm import LLM, SamplingParams
import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader, Dataset
import argparse
import os
from typing import Tuple
import qwen_vl_utils
from qwen_vl_utils import process_vision_info
from tqdm import tqdm
from transformers import AutoProcessor
from video_reader import PyVideoReader
os.environ["TOKENIZERS_PARALLELISM"] = "false"
input_prompt = (
"Please generate a comprehensive caption for the following video, describing various aspects, including but not limited to: "
"1. The main theme and setting of the image (such as location, time of day, weather conditions, etc.) "
"2. Key objects and their characteristics (such as color, shape, size, etc.) "
"3. Relationships and interactions between objects (such as positioning, actions, etc.) "
"4. Any people present and their emotions or activities (such as expressions, postures, etc.) "
"5. Background and environmental details (such as architecture, natural scenery, etc.) "
"6. Motion of the Subject: The movement of people or objects in the video. Use verbs that describe movement. "
"7. Camera motion control: zoom in, zoom out, push in, pull out, pan right, pan left, truck right, truck left, tilt up, tilt down, pedestal up, pedestal down, arc shot, tracking shot, static shot, and handheld shot. "
'Do not describe imagined content. Only describe what can be determined from the video. Avoid listing things. Do not use abstract concepts (love, hate, justice, infinity, joy) as subjects. Use concrete nouns (human, cup, dog, planet, headphones) for more accurate results. Use verbs to describe the movement and changes of the subject or people. Write your prompts in plain, conversational language. Start your description directly with the main subject, typically a noun. Without "\n", subheading and title. '
"For guidance on the expected output format and content length, refer to the provided examples:"
"The video begins with the viewer moving forward along a rocky path surrounded by dense greenery under a clear blue sky. The camera smoothly pans to reveal a signpost on the left, indicating a trailhead, before continuing along the uneven terrain dotted with shrubs and small trees. As the journey progresses, the path ascends slightly, leading to a set of wooden steps that navigate through the lush vegetation. The camera angle shifts subtly to capture the ascent, highlighting the natural textures of the rocks and foliage. Upon reaching the top, the scene opens up to a breathtaking view of Castle Rock Beach, with the vast ocean stretching out to the horizon and a prominent rock formation standing tall against the backdrop of the sea. The camera then pans back to the trail, showing more steps and the surrounding forested area, emphasizing the serene and untouched beauty of the location. The sunlight bathes the entire landscape in warm hues, casting sharp shadows and enhancing the vivid greens and earthy tones of the environment. The video concludes with the camera moving steadily along the trail, capturing the intricate details of the natural surroundings and the tranquil atmosphere of this remote coastal setting. "
"Attention: #######. Please describe the content of the video and the changes that occur, in chronological order:"
)
def _read_video_decord_cus(
ele: dict,
) -> Tuple[torch.Tensor, float]:
vr = PyVideoReader(ele["video"], threads=0)
# crop video
# s_x, e_x, s_y, e_y = ele["crop"]
# sample video
# total_frames = ele["video_end"] - ele["video_start"]
# _, video_fps = len(vr), vr.get_avg_fps()
total_frames, video_fps = len(vr), vr.get_fps()
nframes = 32
# nframes = qwen_vl_utils.vision_process.smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
idx = np.linspace(0, total_frames - 1, nframes).round().astype(int).tolist()
# idx = [i + ele["video_start"] for i in idx]
video = vr.decode()[idx]
# video = vr.get_batch(idx).asnumpy()
video = torch.tensor(video).permute(0, 3, 1, 2) # Convert to TCHW format
# video = video[:, :, s_y:e_y, s_x:e_x]
sample_fps = nframes / max(total_frames, 1e-6) * video_fps
vr = None
del vr
return video, sample_fps
qwen_vl_utils.vision_process.VIDEO_READER_BACKENDS = {
"decord": _read_video_decord_cus,
}
class CaptionData(Dataset):
def __init__(self, video_data, input_video_root, output_json_folder, processor):
super().__init__()
self.input_video_root = input_video_root
self.output_json_folder = output_json_folder
vid_paths = [i["path"] for i in video_data]
video_keys = [i["video_key"] for i in video_data]
cameraFiles = [i["cameraFile"] for i in video_data]
locations = [i["location"] for i in video_data]
scenes = [i["scene"] for i in video_data]
crowdDensitys = [i["crowdDensity"] for i in video_data]
weathers = [i["weather"] for i in video_data]
timeOfDays = [i["timeOfDay"] for i in video_data]
save_paths = [
os.path.join(output_json_folder, (i["video_key"] + ".csv"))
for i in video_data
]
print("part x origin num", len(save_paths))
self.paths = [
[save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay]
for save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in zip(
save_paths, vid_paths, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
)
]
print("part x need to process num", len(self.paths))
self.processor = processor
def __len__(self):
return len(self.paths)
def load_video(self, path, location, scene, crowdDensity, weather, timeOfDay):
useful_message = f"here is some auxiliary information about the video, the location is {location}, the scene is {scene}, the crowdDensity is {crowdDensity}, the weather is {weather}, the timeOfDay is {timeOfDay}."
messages = [
{
"role": "user",
"content": [
{
"type": "video",
"video": path,
# "total_pixels": 20480 * 28 * 28,
"min_pixels": 16 * 28 * 28,
# "max_pixels": 512 * 512,
"fps": 1.0,
# "video_start": cut[0],
# "video_end": cut[1],
# "crop": crop,
},
{"type": "text", "text": input_prompt.replace("#######", useful_message)},
],
}
]
# Preparation for inference
text = self.processor.apply_chat_template(
messages, tokenize=False, add_generation_prompt=True
)
image_inputs, video_inputs = process_vision_info(messages)
mm_data = {}
if image_inputs is not None:
mm_data["image"] = image_inputs
if video_inputs is not None:
mm_data["video"] = video_inputs
inputs = {
"prompt": text,
"multi_modal_data": mm_data,
}
return inputs
def wrapper(self, index):
save_path, video_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.paths[index]
inputs = [self.load_video(video_path, location, scene, crowdDensity, weather, timeOfDay)]
return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
def __getitem__(self, index):
try:
save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.wrapper(index)
return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
except Exception as e:
print("error", e)
return False, False, False
def collate_fn(batch):
save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays = zip(*batch)
inputs = inputs[0]
if not inputs:
return False, False, False, False, False, False, False, False, False
return save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument(
"--model_id_or_path",
type=str,
default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Qwen/Qwen2.5-VL-7B-Instruct/",
)
parser.add_argument("--batch_size", type=int, default=1)
parser.add_argument(
"--input_csv",
type=str,
default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/test.csv",
)
parser.add_argument(
"--input_video_root", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-game-walking-193"
)
parser.add_argument(
"--output_csv_path",
type=str,
default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/test-193",
)
parser.add_argument("--num_workers", type=int, default=0)
parser.add_argument("--part", type=int, default=0)
parser.add_argument("--total_part", type=int, default=1)
args = parser.parse_args()
return args
def main(args, llm):
assert args.batch_size == 1
model_id_or_path = args.model_id_or_path
processor = AutoProcessor.from_pretrained(model_id_or_path)
# 读取并预处理
df = pd.read_csv(args.input_csv)
keep_columns = ['videoFile', 'cameraFile', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay']
df = df[keep_columns].copy()
# 批量构建路径和key
video_files = df['videoFile'].values
paths = np.array([os.path.join(args.input_video_root, f) for f in video_files])
video_keys = np.array([os.path.splitext(os.path.basename(f))[0] for f in video_files])
# 添加新列
df['path'] = paths
df['video_key'] = video_keys
# 转换为字典列表
video_data = df.to_dict('records')
print(f"总共构建了 {len(video_data)} 个视频数据项")
if len(video_data) == 0:
print("Finish: no data need to be processed!")
return
video_data = video_data[args.part :: args.total_part]
data = CaptionData(
video_data, args.input_video_root, args.output_csv_path, processor
)
loader = DataLoader(
data,
batch_size=args.batch_size,
num_workers=args.num_workers,
pin_memory=False,
prefetch_factor=2 if args.num_workers > 0 else None,
shuffle=False,
drop_last=False,
collate_fn=collate_fn,
)
sampling_params = SamplingParams(
temperature=0.1,
top_p=0.001,
# top_k=1,
repetition_penalty=1.05,
max_tokens=512,
)
for save_paths, frames, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in tqdm(loader):
if not save_paths:
print(f"{save_paths} is broking")
continue
if os.path.exists(save_paths[0]):
print(f"{save_paths} is already exists")
continue
if len(save_paths[0]) > 255:
print("Name too long, skipping :", save_paths[0])
continue
folder, filename = os.path.split(save_paths[0])
os.makedirs(folder, exist_ok=True)
try:
results = []
for inputs in frames:
with torch.inference_mode():
outputs = llm.generate([inputs], sampling_params=sampling_params)
generated_text = outputs[0].outputs[0].text
results.append(generated_text)
df = pd.DataFrame({'videoFile': f"{video_key[0]}.mp4", 'cameraFile': cameraFile[0], 'caption': results[0].replace('\n', ' ').replace('\r', ' '), 'location': location[0], 'scene': scene[0], 'crowdDensity': crowdDensity[0], 'weather': weather[0], 'timeOfDay': timeOfDay[0]}, index=[0])
output_path = save_paths[0]
df.to_csv(f"{output_path}", index=False)
except Exception as e:
print(f"Error processing: {e}")
print("Done")
if __name__ == "__main__":
# os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
args = parse_args()
args.model_id_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Qwen/Qwen2.5-VL-7B-Instruct/"
llm = LLM(
args.model_id_or_path,
# max_model_len=32768 if process_vision_info is None else 4096,
# tensor_parallel_size=2,
# distributed_executor_backend="mp",
gpu_memory_utilization=0.95
)
main(args, llm)