File size: 5,741 Bytes
b4feb07
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import yaml
import argparse
import torch
import torchvision
from PIL import Image
import logging
import sys

# --- Diffusers and Transformers Imports ---
from diffusers import AutoencoderKLWan, UniPCMultistepScheduler, HunyuanVideoTransformer3DModel, FlowMatchEulerDiscreteScheduler
from diffusers.utils import load_image
from transformers import CLIPVisionModel

# --- Low-pass Pipelines ---
from pipeline_wan_image2video_lowpass import WanImageToVideoPipeline
from pipeline_cogvideox_image2video_lowpass import CogVideoXImageToVideoPipeline
from pipeline_hunyuan_video_image2video_lowpass import HunyuanVideoImageToVideoPipeline

from lp_utils import get_hunyuan_video_size

from diffusers.utils import export_to_video

# --- Basic Logging Setup ---
logging.basicConfig(level=logging.INFO, format='%(asctime)s - %(levelname)s - %(message)s', stream=sys.stdout)
logger = logging.getLogger(__name__)


def main(args):
    # 1. Configuration
    IMAGE_PATH = args.image_path
    PROMPT = args.prompt
    OUTPUT_PATH = args.output_path
    MODEL_CACHE_DIR = args.model_cache_dir

    with open(args.config, 'r') as f:
        config = yaml.safe_load(f)

    model_path = config['model']['path']
    model_dtype_str = config['model']['dtype']
    model_dtype = getattr(torch, model_dtype_str)

    device = "cuda" if torch.cuda.is_available() else "cpu"

    logger.info(f"Using device: {device}")

    # 2. Pipeline preparation
    if "Wan" in model_path:
        image_encoder = CLIPVisionModel.from_pretrained(model_path,
            subfolder="image_encoder",
            torch_dtype=torch.float32,
            cache_dir=MODEL_CACHE_DIR
        )
        vae = AutoencoderKLWan.from_pretrained(model_path,
            subfolder="vae",
            torch_dtype=torch.float32,
            cache_dir=MODEL_CACHE_DIR
        )
        pipe = WanImageToVideoPipeline.from_pretrained(model_path,
            vae=vae,
            image_encoder=image_encoder,
            torch_dtype=model_dtype,
            cache_dir=MODEL_CACHE_DIR
        )
        # Recommended setup (See https://github.com/huggingface/diffusers/blob/3c8b67b3711b668a6e7867e08b54280e51454eb5/src/diffusers/pipelines/wan/pipeline_wan.py#L58C13-L58C23)
        pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=3.0 if config['generation']['height'] == '480' else 5.0)
    elif "CogVideoX" in model_path:
        pipe = CogVideoXImageToVideoPipeline.from_pretrained(
            model_path,
            torch_dtype=model_dtype,
            cache_dir=MODEL_CACHE_DIR
        )
    elif "HunyuanVideo" in model_path:
        transformer = HunyuanVideoTransformer3DModel.from_pretrained(
            model_path,
            subfolder="transformer",
            torch_dtype=torch.bfloat16,
            cache_dir=MODEL_CACHE_DIR
        )
        pipe = HunyuanVideoImageToVideoPipeline.from_pretrained(
            model_path, transformer=transformer,
            torch_dtype=torch.float16,
            cache_dir=MODEL_CACHE_DIR
        )
        pipe.scheduler = FlowMatchEulerDiscreteScheduler.from_config(
            pipe.scheduler.config,
            flow_shift= config['model']['flow_shift'],
            invert_sigmas = config['model']['flow_reverse']
        )
    pipe.to(device)

    logger.info("Pipeline loaded successfully.")

    # 3. Prepare inputs
    input_image = load_image(Image.open(IMAGE_PATH))

    generator = torch.Generator(device=device).manual_seed(42)

    pipe_kwargs = {
        "image": input_image,
        "prompt": PROMPT,
        "generator": generator,
    }

    params_from_config = {**config.get('generation', {}), **config.get('alg', {})}

    for key, value in params_from_config.items():
        if value is not None:
            pipe_kwargs[key] = value

    logger.info("Starting video generation...")
    log_subset = {k: v for k, v in pipe_kwargs.items() if k not in ['image', 'generator']}
    logger.info(f"Pipeline arguments: {log_subset}")

    if "HunyuanVideo" in model_path:
        pipe_kwargs["height"], pipe_kwargs["width"] = get_hunyuan_video_size(config['video']['resolution'], input_image)

    # 4. Generate video
    video_output = pipe(**pipe_kwargs)
    video_frames = video_output.frames[0]  # Output is a list containing a list of PIL Images
    logger.info(f"Video generation complete. Received {len(video_frames)} frames.")

    # # 5. Save video
    # video_tensors = [torchvision.transforms.functional.to_tensor(frame) for frame in video_frames]
    # video_tensor = torch.stack(video_tensors)  # Shape: (T, C, H, W)
    # video_tensor = video_tensor.permute(0, 2, 3, 1)  # Shape: (T, H, W, C) for write_video
    # video_tensor = (video_tensor * 255).clamp(0, 255).to(torch.uint8).cpu()

    # logger.info(f"Saving video to: {OUTPUT_PATH}")
    # torchvision.io.write_video(
    #     OUTPUT_PATH,
    #     video_tensor,
    #     fps=config['video']['fps'],
    #     video_codec='h264',
    #     options={'crf': '18', 'preset': 'slow'}
    # )

    export_to_video(video_frames, OUTPUT_PATH, fps=config['video']['fps'])
    logger.info("Video saved successfully. Run complete.")


if __name__ == '__main__':
    parser = argparse.ArgumentParser(description="Arguments")
    parser.add_argument("--config", type=str, default="./configs/hunyuan_video_alg.yaml")
    parser.add_argument("--image_path", type=str, default="./assets/a red double decker bus driving down a street.jpg")
    parser.add_argument("--prompt", type=str, default="a red double decker bus driving down a street")
    parser.add_argument("--output_path", type=str, default="output.mp4")
    parser.add_argument("--model_cache_dir", type=str, default=None)
    args = parser.parse_args()

    main(args)