File size: 14,892 Bytes
e31e7b4
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
from vllm import LLM, SamplingParams

import numpy as np
import pandas as pd
import torch
from torch.utils.data import DataLoader, Dataset

import argparse
import os
from typing import Tuple

import keye_vl_utils
from keye_vl_utils import process_vision_info
from tqdm import tqdm
from transformers import AutoProcessor

from video_reader import PyVideoReader

os.environ["TOKENIZERS_PARALLELISM"] = "false"

input_prompt = (
    "Please generate a comprehensive caption for the following video, describing various aspects, including but not limited to: "
    "1. The main theme and setting of the image (such as location, time of day, weather conditions, etc.) "
    "2. Key objects and their characteristics (such as color, shape, size, etc.) "
    "3. Relationships and interactions between objects (such as positioning, actions, etc.) "
    "4. Any people present and their emotions or activities (such as expressions, postures, etc.) "
    "5. Background and environmental details (such as architecture, natural scenery, etc.) "
    "6. Motion of the Subject: The movement of people or objects in the video. Use verbs that describe movement. "
    "7. Camera motion control: zoom in, zoom out, push in, pull out, pan right, pan left, truck right, truck left, tilt up, tilt down, pedestal up, pedestal down, arc shot,  tracking shot, static shot, and handheld shot. "
    'Do not describe imagined content. Only describe what can be determined from the video. Avoid listing things. Do not use abstract concepts (love, hate, justice, infinity, joy) as subjects. Use concrete nouns (human, cup, dog, planet, headphones) for more accurate results. Use verbs to describe the movement and changes of the subject or people. Write your prompts in plain, conversational language. Start your description directly with the main subject, typically a noun. Without "\n", subheading and title. '
    "For guidance on the expected output format and content length, refer to the provided examples:"
    "The video begins with the viewer moving forward along a rocky path surrounded by dense greenery under a clear blue sky. The camera smoothly pans to reveal a signpost on the left, indicating a trailhead, before continuing along the uneven terrain dotted with shrubs and small trees. As the journey progresses, the path ascends slightly, leading to a set of wooden steps that navigate through the lush vegetation. The camera angle shifts subtly to capture the ascent, highlighting the natural textures of the rocks and foliage. Upon reaching the top, the scene opens up to a breathtaking view of Castle Rock Beach, with the vast ocean stretching out to the horizon and a prominent rock formation standing tall against the backdrop of the sea. The camera then pans back to the trail, showing more steps and the surrounding forested area, emphasizing the serene and untouched beauty of the location. The sunlight bathes the entire landscape in warm hues, casting sharp shadows and enhancing the vivid greens and earthy tones of the environment. The video concludes with the camera moving steadily along the trail, capturing the intricate details of the natural surroundings and the tranquil atmosphere of this remote coastal setting. "
    "Attention: #######. Please describe the content of the video and the changes that occur, in chronological order:"
)

# def _read_video_decord_cus(
#     ele: dict,
# ) -> Tuple[torch.Tensor, float]:
#     vr = PyVideoReader(ele["video"], threads=0)
#     # crop video
#     # s_x, e_x, s_y, e_y = ele["crop"]
#     # sample video
#     # total_frames = ele["video_end"] - ele["video_start"]
#     # _, video_fps = len(vr), vr.get_avg_fps()
#     total_frames, video_fps = len(vr), vr.get_fps()
#     nframes = 32
#     # nframes = keye_vl_utils.vision_process.smart_nframes(ele, total_frames=total_frames, video_fps=video_fps)
#     idx = np.linspace(0, total_frames - 1, nframes).round().astype(int).tolist()
#     # idx = [i + ele["video_start"] for i in idx]
#     video = vr.decode()[idx]
#     # video = vr.get_batch(idx).asnumpy()
#     video = torch.tensor(video).permute(0, 3, 1, 2)  # Convert to TCHW format
#     # video = video[:, :, s_y:e_y, s_x:e_x]
#     sample_fps = nframes / max(total_frames, 1e-6) * video_fps
#     vr = None
#     del vr
#     return video, sample_fps

def _read_video_decord_cus(
        ele: dict,
) -> torch.Tensor:
    """read video using decord.VideoReader

    Args:
        ele (dict): a dict contains the configuration of video.
        support keys:
            - video: the path of video. support "file://", "http://", "https://" and local path.
            - video_start: the start time of video.
            - video_end: the end time of video.
    Returns:
        torch.Tensor: the video tensor with shape (T, C, H, W).
    """
    import decord
    st = time.time()
    if isinstance(ele["video"], bytes):
        video_path = ""
        fp = py_io.BytesIO(ele["video"])
        vr = decord.VideoReader(fp)
    else:
        video_path = ele["video"]
        vr = decord.VideoReader(video_path)
    # TODO: support start_pts and end_pts
    if 'video_start' in ele or 'video_end' in ele:
        raise NotImplementedError("not support start_pts and end_pts in decord for now.")
    nframes, video_fps = len(vr), vr.get_avg_fps()
    # timestamp start from 0.0
    timestamps = torch.FloatTensor([(1 / video_fps) * i for i in range(nframes)])

    # final_nframes = smart_nframes(ele, total_frames=nframes, video_fps=video_fps)
    # indices = torch.linspace(0, nframes - 1, final_nframes).round().long()

    final_nframes = 32
    idx = np.linspace(0, nframes - 1, final_nframes).round().astype(int).tolist()

    frames = vr.get_batch(indices.tolist()).asnumpy()
    frames = torch.tensor(frames).permute(0, 3, 1, 2)
    logger.debug(f"Decord: {video_path=}, {nframes=}, {video_fps=}, time={time.time() - st:.3f}s")
    timestamps = timestamps[indices]

    ##### extract key frames start ######
    threshold = ele.get("min_frame_similarity", MIN_FRAME_SIMILARITY)
    frame_types = extract_slow_fast_frames(frames, threshold)
    ##### extract key frames end ######
    logger.debug(f"Read video:  {video_path=}, {nframes=}, {video_fps=}, time={time.time() - st:.3f}s")

    return frames, timestamps, frame_types

keye_vl_utils.vision_process.VIDEO_READER_BACKENDS = {
    "decord": _read_video_decord_cus,
}


class CaptionData(Dataset):
    def __init__(self, video_data, input_video_root, output_json_folder, processor):
        super().__init__()
        self.input_video_root = input_video_root
        self.output_json_folder = output_json_folder

        vid_paths = [i["path"] for i in video_data]
        video_keys = [i["video_key"] for i in video_data]
        cameraFiles = [i["cameraFile"] for i in video_data]
        locations = [i["location"] for i in video_data]
        scenes = [i["scene"] for i in video_data]
        crowdDensitys = [i["crowdDensity"] for i in video_data]
        weathers = [i["weather"] for i in video_data]
        timeOfDays = [i["timeOfDay"] for i in video_data]
        save_paths = [
            os.path.join(output_json_folder, (i["video_key"] + ".csv"))
            for i in video_data
        ]
        print("part x origin num", len(save_paths))
        self.paths = [
            [save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay]
            for save_path, vid_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in zip(
                save_paths, vid_paths, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays
            )
        ]
        print("part x need to process num", len(self.paths))

        self.processor = processor

    def __len__(self):
        return len(self.paths)

    def load_video(self, path, location, scene, crowdDensity, weather, timeOfDay):
        useful_message = f"here is some auxiliary information about the video, the location is {location}, the scene is {scene}, the crowdDensity is {crowdDensity}, the weather is {weather}, the timeOfDay is {timeOfDay}."
        messages = [
            {
                "role": "user",
                "content": [
                    {
                        "type": "video",
                        "video": path,
                        # "total_pixels": 20480 * 28 * 28,
                        # "min_pixels": 16 * 28 * 28,
                        # "max_pixels": 512 * 512,
                        # "fps": 1.0,
                        # "video_start": cut[0],
                        # "video_end": cut[1],
                        # "crop": crop,
                    },
                    {"type": "text", "text": input_prompt.replace("#######", useful_message)},
                ],
            }
        ]
        # Preparation for inference
        text = self.processor.apply_chat_template(
            messages, tokenize=False, add_generation_prompt=True
        )
        image_inputs, video_inputs, video_kwargs = process_vision_info(messages)

        mm_data = {}
        if image_inputs is not None:
            mm_data["image"] = image_inputs
        if video_inputs is not None:
            mm_data["video"] = video_inputs

        inputs = {
            "prompt": text,
            "multi_modal_data": mm_data,
            # FPS will be returned in video_kwargs
            "mm_processor_kwargs": video_kwargs,
        }

        return inputs

    def wrapper(self, index):
        save_path, video_path, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.paths[index]
        inputs = [self.load_video(video_path, location, scene, crowdDensity, weather, timeOfDay)]
        return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay

    def __getitem__(self, index):
        try:
            save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay = self.wrapper(index)
            return save_path, inputs, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay
        except Exception as e:
            print("error", e)
            return False, False, False


def collate_fn(batch):
    save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays = zip(*batch)
    inputs = inputs[0]
    if not inputs:
        return False, False, False, False, False, False, False, False, False
    return save_paths, inputs, video_keys, cameraFiles, locations, scenes, crowdDensitys, weathers, timeOfDays


def parse_args():
    parser = argparse.ArgumentParser()
    parser.add_argument(
        "--model_id_or_path",
        type=str,
        default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Qwen/Qwen2.5-VL-7B-Instruct/",
    )
    parser.add_argument("--batch_size", type=int, default=1)
    parser.add_argument(
        "--input_csv",
        type=str,
        default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/temp_input_csv/test.csv",
    )
    parser.add_argument(
        "--input_video_root", type=str, default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/sekai-real-walking-hq-193"
    )
    parser.add_argument(
        "--output_csv_path",
        type=str,
        default="/mnt/bn/yufan-dev-my/ysh/Ckpts/Lixsp11/0_final_sekai_dataset/yamls/test-193",
    )
    parser.add_argument("--num_workers", type=int, default=0)
    parser.add_argument("--part", type=int, default=0)
    parser.add_argument("--total_part", type=int, default=1)
    args = parser.parse_args()
    return args


def main(args, llm, sampling_params):
    assert args.batch_size == 1

    model_id_or_path = args.model_id_or_path
    processor = AutoProcessor.from_pretrained(model_id_or_path, trust_remote_code=True)

    # 读取并预处理
    df = pd.read_csv(args.input_csv)
    keep_columns = ['videoFile', 'cameraFile', 'location', 'scene', 'crowdDensity', 'weather', 'timeOfDay']
    df = df[keep_columns].copy()

    # 批量构建路径和key
    video_files = df['videoFile'].values
    paths = np.array([os.path.join(args.input_video_root, f) for f in video_files])
    video_keys = np.array([os.path.splitext(os.path.basename(f))[0] for f in video_files])

    # 添加新列
    df['path'] = paths
    df['video_key'] = video_keys

    # 转换为字典列表
    video_data = df.to_dict('records')
    print(f"总共构建了 {len(video_data)} 个视频数据项")

    video_data = video_data[args.part :: args.total_part]
    data = CaptionData(
        video_data, args.input_video_root, args.output_csv_path, processor
    )
    loader = DataLoader(
        data,
        batch_size=args.batch_size,
        num_workers=args.num_workers,
        pin_memory=False,
        prefetch_factor=2 if args.num_workers > 0 else None,
        shuffle=False,
        drop_last=False,
        collate_fn=collate_fn,
    )

    for save_paths, frames, video_key, cameraFile, location, scene, crowdDensity, weather, timeOfDay in tqdm(loader):
        if not save_paths:
            print(f"{save_paths} is broking")
            continue
        if os.path.exists(save_paths[0]):
            print(f"{save_paths} is already exists")
            continue
        if len(save_paths[0]) > 255:
            print("Name too long, skipping :", save_paths[0])
            continue

        folder, filename = os.path.split(save_paths[0])
        os.makedirs(folder, exist_ok=True)

        try:
            results = []
            for inputs in frames:
                with torch.inference_mode():
                    outputs = llm.generate([inputs], sampling_params=sampling_params)
                    generated_text = outputs[0].outputs[0].text
                    results.append(generated_text)

            df = pd.DataFrame({'videoFile': f"{video_key[0]}.mp4", 'cameraFile': cameraFile[0], 'caption': results[0].replace('\n', ' ').replace('\r', ' '), 'location': location[0], 'scene': scene[0], 'crowdDensity': crowdDensity[0], 'weather': weather[0], 'timeOfDay': timeOfDay[0]}, index=[0])
            output_path = save_paths[0]
            df.to_csv(f"{output_path}", index=False)

        except Exception as e:
            print(f"Error processing: {e}")

    print("Done")


if __name__ == "__main__":
    # os.environ["VLLM_WORKER_MULTIPROC_METHOD"] = "spawn"
    args = parse_args()

    args.model_id_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Kwai-Keye/Keye-VL-1_5-8B"
    llm = LLM(
        args.model_id_or_path,
        # max_model_len=32768 if process_vision_info is None else 4096,
        # tensor_parallel_size=2,
        # distributed_executor_backend="mp",
        gpu_memory_utilization=0.95,
        trust_remote_code=True,
    )

    sampling_params = SamplingParams(
        temperature=0.3,
        max_tokens=512,
    )
    main(args, llm, sampling_params)