| import os | |
| os.environ["HF_ENABLE_PARALLEL_LOADING"] = "yes" | |
| import torch | |
| from diffusers import AutoencoderKLWan | |
| from diffusers.video_processor import VideoProcessor | |
| from diffusers.utils import export_to_video | |
| device = "cuda" | |
| pretrained_model_name_or_path = "/mnt/bn/yufan-dev-my/ysh/Ckpts/Wan-AI/Wan2.1-I2V-14B-720P-Diffusers/" | |
| vae = AutoencoderKLWan.from_pretrained( | |
| pretrained_model_name_or_path, | |
| subfolder="vae", | |
| torch_dtype=torch.float32, | |
| ).to(device) | |
| vae.eval() | |
| vae.requires_grad_(False) | |
| vae.enable_tiling() | |
| vae_scale_factor_spatial = vae.spatial_compression_ratio | |
| video_processor = VideoProcessor(vae_scale_factor=vae_scale_factor_spatial) | |
| latents = torch.load('/mnt/bn/yufan-dev-my/ysh/Datasets/fp_offload_latents_wan/6ad434bc-df9b-40be-9632-c8f9508f1ccc_121_768_384.pt', map_location='cpu', weights_only=False) | |
| latents_mean = torch.tensor(vae.config.latents_mean).view(1, vae.config.z_dim, 1, 1, 1) | |
| latents_std = 1.0 / torch.tensor(vae.config.latents_std).view(1, vae.config.z_dim, 1, 1, 1) | |
| vae_latents = latents['vae_latent'] / latents_std + latents_mean | |
| vae_latents = vae_latents.to(device=device, dtype=vae.dtype) | |
| video = vae.decode(vae_latents, return_dict=False)[0] | |
| video = video_processor.postprocess_video(video, output_type="pil") | |
| export_to_video(video[0], "output_wan.mp4", fps=30) |