SuperCS commited on
Commit
fff3baa
·
verified ·
1 Parent(s): b4feb07

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. exp_code/1_benchmark/CausVid/causvid/__pycache__/bidirectional_trajectory_pipeline.cpython-312.pyc +0 -0
  2. exp_code/1_benchmark/CausVid/causvid/__pycache__/data.cpython-312.pyc +0 -0
  3. exp_code/1_benchmark/CausVid/causvid/__pycache__/dmd.cpython-312.pyc +0 -0
  4. exp_code/1_benchmark/CausVid/causvid/__pycache__/loss.cpython-312.pyc +0 -0
  5. exp_code/1_benchmark/CausVid/causvid/__pycache__/ode_regression.cpython-312.pyc +0 -0
  6. exp_code/1_benchmark/CausVid/causvid/__pycache__/scheduler.cpython-312.pyc +0 -0
  7. exp_code/1_benchmark/CausVid/causvid/__pycache__/util.cpython-312.pyc +0 -0
  8. exp_code/1_benchmark/CausVid/causvid/bidirectional_trajectory_pipeline.py +47 -0
  9. exp_code/1_benchmark/CausVid/causvid/data.py +74 -0
  10. exp_code/1_benchmark/CausVid/causvid/dmd.py +497 -0
  11. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/captions_coco14_test.txt +0 -0
  12. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/__init__.py +0 -0
  13. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/clip_features.py +38 -0
  14. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/downloads_helper.py +73 -0
  15. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/features.py +85 -0
  16. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/fid.py +635 -0
  17. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/inception_pytorch.py +332 -0
  18. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/inception_torchscript.py +57 -0
  19. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/leaderboard.py +43 -0
  20. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/resize.py +133 -0
  21. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/utils.py +98 -0
  22. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/wrappers.py +108 -0
  23. exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/coco_evaluator.py +246 -0
  24. exp_code/1_benchmark/CausVid/causvid/evaluation/eval_sdxl_coco.py +135 -0
  25. exp_code/1_benchmark/CausVid/causvid/evaluation/inference_sdxl.py +146 -0
  26. exp_code/1_benchmark/CausVid/causvid/evaluation/parallel_sdxl_eval.sh +54 -0
  27. exp_code/1_benchmark/CausVid/causvid/loss.py +82 -0
  28. exp_code/1_benchmark/CausVid/causvid/models/__init__.py +56 -0
  29. exp_code/1_benchmark/CausVid/causvid/models/__pycache__/__init__.cpython-312.pyc +0 -0
  30. exp_code/1_benchmark/CausVid/causvid/models/__pycache__/model_interface.cpython-312.pyc +0 -0
  31. exp_code/1_benchmark/CausVid/causvid/models/model_interface.py +114 -0
  32. exp_code/1_benchmark/CausVid/causvid/models/sdxl/__pycache__/sdxl_wrapper.cpython-312.pyc +0 -0
  33. exp_code/1_benchmark/CausVid/causvid/models/sdxl/sdxl_wrapper.py +200 -0
  34. exp_code/1_benchmark/CausVid/causvid/models/wan/__init__.py +0 -0
  35. exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/__init__.cpython-312.pyc +0 -0
  36. exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/causal_inference.cpython-312.pyc +0 -0
  37. exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/causal_model.cpython-312.pyc +0 -0
  38. exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/flow_match.cpython-312.pyc +0 -0
  39. exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/wan_wrapper.cpython-312.pyc +0 -0
  40. exp_code/1_benchmark/CausVid/causvid/models/wan/bidirectional_inference.py +69 -0
  41. exp_code/1_benchmark/CausVid/causvid/models/wan/causal_inference.py +204 -0
  42. exp_code/1_benchmark/CausVid/causvid/models/wan/causal_model.py +749 -0
  43. exp_code/1_benchmark/CausVid/causvid/models/wan/flow_match.py +83 -0
  44. exp_code/1_benchmark/CausVid/causvid/models/wan/generate_ode_pairs.py +125 -0
  45. exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/README.md +2 -0
  46. exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__init__.py +3 -0
  47. exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__pycache__/__init__.cpython-312.pyc +0 -0
  48. exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__pycache__/image2video.cpython-312.pyc +0 -0
  49. exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__pycache__/text2video.cpython-312.pyc +0 -0
  50. exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/configs/__init__.py +42 -0
exp_code/1_benchmark/CausVid/causvid/__pycache__/bidirectional_trajectory_pipeline.cpython-312.pyc ADDED
Binary file (2.8 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/__pycache__/data.cpython-312.pyc ADDED
Binary file (4.22 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/__pycache__/dmd.cpython-312.pyc ADDED
Binary file (24.9 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/__pycache__/loss.cpython-312.pyc ADDED
Binary file (4.58 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/__pycache__/ode_regression.cpython-312.pyc ADDED
Binary file (8.92 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/__pycache__/scheduler.cpython-312.pyc ADDED
Binary file (5.48 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/__pycache__/util.cpython-312.pyc ADDED
Binary file (7.96 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/bidirectional_trajectory_pipeline.py ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models.model_interface import (
2
+ InferencePipelineInterface,
3
+ DiffusionModelInterface,
4
+ TextEncoderInterface
5
+ )
6
+ from causvid.scheduler import SchedulerInterface
7
+ from typing import List
8
+ import torch
9
+
10
+
11
+ class BidirectionalInferenceWrapper(InferencePipelineInterface):
12
+ def __init__(self, denoising_step_list: List[int],
13
+ scheduler: SchedulerInterface,
14
+ generator: DiffusionModelInterface, **kwargs):
15
+ super().__init__()
16
+ self.scheduler = scheduler
17
+ self.generator = generator
18
+ self.denoising_step_list = denoising_step_list
19
+
20
+ def inference_with_trajectory(self, noise: torch.Tensor, conditional_dict: dict) -> torch.Tensor:
21
+ output_list = [noise]
22
+
23
+ # initial point
24
+ noisy_image_or_video = noise
25
+
26
+ # use the last n-1 timesteps to simulate the generator's input
27
+ for index, current_timestep in enumerate(self.denoising_step_list[:-1]):
28
+ pred_image_or_video = self.generator(
29
+ noisy_image_or_video=noisy_image_or_video,
30
+ conditional_dict=conditional_dict,
31
+ timestep=torch.ones(
32
+ noise.shape[:2], dtype=torch.long, device=noise.device) * current_timestep
33
+ ) # [B, F, C, H, W]
34
+
35
+ # TODO: Change backward simulation for causal video
36
+ next_timestep = self.denoising_step_list[index + 1] * torch.ones(
37
+ noise.shape[:2], dtype=torch.long, device=noise.device)
38
+ noisy_image_or_video = self.scheduler.add_noise(
39
+ pred_image_or_video.flatten(0, 1),
40
+ torch.randn_like(pred_image_or_video.flatten(0, 1)),
41
+ next_timestep.flatten(0, 1)
42
+ ).unflatten(0, noise.shape[:2])
43
+ output_list.append(noisy_image_or_video)
44
+
45
+ # [B, T, F, C, H, W]
46
+ output = torch.stack(output_list, dim=1)
47
+ return output
exp_code/1_benchmark/CausVid/causvid/data.py ADDED
@@ -0,0 +1,74 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.ode_data.create_lmdb_iterative import get_array_shape_from_lmdb, retrieve_row_from_lmdb
2
+ from torch.utils.data import Dataset
3
+ import numpy as np
4
+ import torch
5
+ import lmdb
6
+
7
+
8
+ class TextDataset(Dataset):
9
+ def __init__(self, data_path):
10
+ self.texts = []
11
+ with open(data_path, "r") as f:
12
+ for line in f:
13
+ self.texts.append(line.strip())
14
+
15
+ def __len__(self):
16
+ return len(self.texts)
17
+
18
+ def __getitem__(self, idx):
19
+ return self.texts[idx]
20
+
21
+
22
+ class ODERegressionDataset(Dataset):
23
+ def __init__(self, data_path, max_pair=int(1e8)):
24
+ self.data_dict = torch.load(data_path, weights_only=False)
25
+ self.max_pair = max_pair
26
+
27
+ def __len__(self):
28
+ return min(len(self.data_dict['prompts']), self.max_pair)
29
+
30
+ def __getitem__(self, idx):
31
+ """
32
+ Outputs:
33
+ - prompts: List of Strings
34
+ - latents: Tensor of shape (num_denoising_steps, num_frames, num_channels, height, width). It is ordered from pure noise to clean image.
35
+ """
36
+ return {
37
+ "prompts": self.data_dict['prompts'][idx],
38
+ "ode_latent": self.data_dict['latents'][idx].squeeze(0),
39
+ }
40
+
41
+
42
+ class ODERegressionLMDBDataset(Dataset):
43
+ def __init__(self, data_path: str, max_pair: int = int(1e8)):
44
+ self.env = lmdb.open(data_path, readonly=True,
45
+ lock=False, readahead=False, meminit=False)
46
+
47
+ self.latents_shape = get_array_shape_from_lmdb(self.env, 'latents')
48
+ self.max_pair = max_pair
49
+
50
+ def __len__(self):
51
+ return min(self.latents_shape[0], self.max_pair)
52
+
53
+ def __getitem__(self, idx):
54
+ """
55
+ Outputs:
56
+ - prompts: List of Strings
57
+ - latents: Tensor of shape (num_denoising_steps, num_frames, num_channels, height, width). It is ordered from pure noise to clean image.
58
+ """
59
+ latents = retrieve_row_from_lmdb(
60
+ self.env,
61
+ "latents", np.float16, idx, shape=self.latents_shape[1:]
62
+ )
63
+
64
+ if len(latents.shape) == 4:
65
+ latents = latents[None, ...]
66
+
67
+ prompts = retrieve_row_from_lmdb(
68
+ self.env,
69
+ "prompts", str, idx
70
+ )
71
+ return {
72
+ "prompts": prompts,
73
+ "ode_latent": torch.tensor(latents, dtype=torch.float32)
74
+ }
exp_code/1_benchmark/CausVid/causvid/dmd.py ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models.model_interface import InferencePipelineInterface
2
+ from causvid.models import (
3
+ get_diffusion_wrapper,
4
+ get_text_encoder_wrapper,
5
+ get_vae_wrapper,
6
+ get_inference_pipeline_wrapper
7
+ )
8
+ from causvid.loss import get_denoising_loss
9
+ import torch.nn.functional as F
10
+ from typing import Tuple
11
+ from torch import nn
12
+ import torch
13
+
14
+
15
+ class DMD(nn.Module):
16
+ def __init__(self, args, device):
17
+ """
18
+ Initialize the DMD (Distribution Matching Distillation) module.
19
+ This class is self-contained and compute generator and fake score losses
20
+ in the forward pass.
21
+ """
22
+ super().__init__()
23
+
24
+ # Step 1: Initialize all models
25
+
26
+ self.generator_model_name = getattr(
27
+ args, "generator_name", args.model_name)
28
+ self.real_model_name = getattr(args, "real_name", args.model_name)
29
+ self.fake_model_name = getattr(args, "fake_name", args.model_name)
30
+
31
+ self.generator_task_type = getattr(
32
+ args, "generator_task_type", args.generator_task)
33
+ self.real_task_type = getattr(
34
+ args, "real_task_type", args.generator_task)
35
+ self.fake_task_type = getattr(
36
+ args, "fake_task_type", args.generator_task)
37
+
38
+ self.generator = get_diffusion_wrapper(
39
+ model_name=self.generator_model_name)()
40
+ self.generator.set_module_grad(
41
+ module_grad=args.generator_grad
42
+ )
43
+
44
+ if getattr(args, "generator_ckpt", False):
45
+ print(f"Loading pretrained generator from {args.generator_ckpt}")
46
+ state_dict = torch.load(args.generator_ckpt, map_location="cpu")[
47
+ 'generator']
48
+ self.generator.load_state_dict(
49
+ state_dict, strict=True
50
+ )
51
+
52
+ self.num_frame_per_block = getattr(args, "num_frame_per_block", 1)
53
+
54
+ if self.num_frame_per_block > 1:
55
+ self.generator.model.num_frame_per_block = self.num_frame_per_block
56
+
57
+ self.real_score = get_diffusion_wrapper(
58
+ model_name=self.real_model_name)()
59
+ self.real_score.set_module_grad(
60
+ module_grad=args.real_score_grad
61
+ )
62
+
63
+ self.fake_score = get_diffusion_wrapper(
64
+ model_name=self.fake_model_name)()
65
+ self.fake_score.set_module_grad(
66
+ module_grad=args.fake_score_grad
67
+ )
68
+
69
+ if args.gradient_checkpointing:
70
+ self.generator.enable_gradient_checkpointing()
71
+ self.fake_score.enable_gradient_checkpointing()
72
+
73
+ self.text_encoder = get_text_encoder_wrapper(
74
+ model_name=args.model_name)()
75
+ self.text_encoder.requires_grad_(False)
76
+
77
+ self.vae = get_vae_wrapper(model_name=args.model_name)()
78
+ self.vae.requires_grad_(False)
79
+
80
+ # this will be init later with fsdp-wrapped modules
81
+ self.inference_pipeline: InferencePipelineInterface = None
82
+
83
+ # Step 2: Initialize all dmd hyperparameters
84
+
85
+ self.denoising_step_list = torch.tensor(
86
+ args.denoising_step_list, dtype=torch.long, device=device)
87
+ self.num_train_timestep = args.num_train_timestep
88
+ self.min_step = int(0.02 * self.num_train_timestep)
89
+ self.max_step = int(0.98 * self.num_train_timestep)
90
+ self.real_guidance_scale = args.real_guidance_scale
91
+ self.timestep_shift = getattr(args, "timestep_shift", 1.0)
92
+
93
+ self.args = args
94
+ self.device = device
95
+ self.dtype = torch.bfloat16 if args.mixed_precision else torch.float32
96
+ self.scheduler = self.generator.get_scheduler()
97
+ self.denoising_loss_func = get_denoising_loss(
98
+ args.denoising_loss_type)()
99
+
100
+ if args.warp_denoising_step: # Warp the denoising step according to the scheduler time
101
+ timesteps = torch.cat((self.scheduler.timesteps.cpu(), torch.tensor([0], dtype=torch.float32))).cuda().cuda()
102
+ self.denoising_step_list = timesteps[1000 - self.denoising_step_list]
103
+
104
+ if getattr(self.scheduler, "alphas_cumprod", None) is not None:
105
+ self.scheduler.alphas_cumprod = self.scheduler.alphas_cumprod.to(
106
+ device)
107
+ else:
108
+ self.scheduler.alphas_cumprod = None
109
+
110
+ def _process_timestep(self, timestep: torch.Tensor, type: str) -> torch.Tensor:
111
+ """
112
+ Pre-process the randomly generated timestep based on the generator's task type.
113
+ Input:
114
+ - timestep: [batch_size, num_frame] tensor containing the randomly generated timestep.
115
+ - type: a string indicating the type of the current model (image, bidirectional_video, or causal_video).
116
+ Output Behavior:
117
+ - image: check that the second dimension (num_frame) is 1.
118
+ - bidirectional_video: broadcast the timestep to be the same for all frames.
119
+ - causal_video: broadcast the timestep to be the same for all frames **in a block**.
120
+ """
121
+ if type == "image":
122
+ assert timestep.shape[1] == 1
123
+ return timestep
124
+ elif type == "bidirectional_video":
125
+ for index in range(timestep.shape[0]):
126
+ timestep[index] = timestep[index, 0]
127
+ return timestep
128
+ elif type == "causal_video":
129
+ # make the noise level the same within every motion block
130
+ timestep = timestep.reshape(timestep.shape[0], -1, self.num_frame_per_block)
131
+ timestep[:, :, 1:] = timestep[:, :, 0:1]
132
+ timestep = timestep.reshape(timestep.shape[0], -1)
133
+ return timestep
134
+ else:
135
+ raise NotImplementedError("Unsupported model type {}".format(type))
136
+
137
+ def _compute_kl_grad(
138
+ self, noisy_image_or_video: torch.Tensor,
139
+ estimated_clean_image_or_video: torch.Tensor,
140
+ timestep: torch.Tensor,
141
+ conditional_dict: dict, unconditional_dict: dict,
142
+ normalization: bool = True
143
+ ) -> Tuple[torch.Tensor, dict]:
144
+ """
145
+ Compute the KL grad (eq 7 in https://arxiv.org/abs/2311.18828).
146
+ Input:
147
+ - noisy_image_or_video: a tensor with shape [B, F, C, H, W] where the number of frame is 1 for images.
148
+ - estimated_clean_image_or_video: a tensor with shape [B, F, C, H, W] representing the estimated clean image or video.
149
+ - timestep: a tensor with shape [B, F] containing the randomly generated timestep.
150
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
151
+ - unconditional_dict: a dictionary containing the unconditional information (e.g. null/negative text embeddings, null/negative image embeddings).
152
+ - normalization: a boolean indicating whether to normalize the gradient.
153
+ Output:
154
+ - kl_grad: a tensor representing the KL grad.
155
+ - kl_log_dict: a dictionary containing the intermediate tensors for logging.
156
+ """
157
+ # Step 1: Compute the fake score
158
+ pred_fake_image = self.fake_score(
159
+ noisy_image_or_video=noisy_image_or_video,
160
+ conditional_dict=conditional_dict,
161
+ timestep=timestep
162
+ )
163
+
164
+ # Step 2: Compute the real score
165
+ # We compute the conditional and unconditional prediction
166
+ # and add them together to achieve cfg (https://arxiv.org/abs/2207.12598)
167
+ pred_real_image_cond = self.real_score(
168
+ noisy_image_or_video=noisy_image_or_video,
169
+ conditional_dict=conditional_dict,
170
+ timestep=timestep
171
+ )
172
+
173
+ pred_real_image_uncond = self.real_score(
174
+ noisy_image_or_video=noisy_image_or_video,
175
+ conditional_dict=unconditional_dict,
176
+ timestep=timestep
177
+ )
178
+
179
+ pred_real_image = pred_real_image_cond + (
180
+ pred_real_image_cond - pred_real_image_uncond
181
+ ) * self.real_guidance_scale
182
+
183
+ # Step 3: Compute the DMD gradient (DMD paper eq. 7).
184
+ grad = (pred_fake_image - pred_real_image)
185
+
186
+ # TODO: Change the normalizer for causal teacher
187
+ if normalization:
188
+ # Step 4: Gradient normalization (DMD paper eq. 8).
189
+ p_real = (estimated_clean_image_or_video - pred_real_image)
190
+ normalizer = torch.abs(p_real).mean(dim=[1, 2, 3, 4], keepdim=True)
191
+ grad = grad / normalizer
192
+ grad = torch.nan_to_num(grad)
193
+
194
+ return grad, {
195
+ "dmdtrain_clean_latent": estimated_clean_image_or_video.detach(),
196
+ "dmdtrain_noisy_latent": noisy_image_or_video.detach(),
197
+ "dmdtrain_pred_real_image": pred_real_image.detach(),
198
+ "dmdtrain_pred_fake_image": pred_fake_image.detach(),
199
+ "dmdtrain_gradient_norm": torch.mean(torch.abs(grad)).detach(),
200
+ "timestep": timestep.detach()
201
+ }
202
+
203
+ def compute_distribution_matching_loss(
204
+ self, image_or_video: torch.Tensor, conditional_dict: dict,
205
+ unconditional_dict: dict, gradient_mask: torch.Tensor = None
206
+ ) -> Tuple[torch.Tensor, dict]:
207
+ """
208
+ Compute the DMD loss (eq 7 in https://arxiv.org/abs/2311.18828).
209
+ Input:
210
+ - image_or_video: a tensor with shape [B, F, C, H, W] where the number of frame is 1 for images.
211
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
212
+ - unconditional_dict: a dictionary containing the unconditional information (e.g. null/negative text embeddings, null/negative image embeddings).
213
+ - gradient_mask: a boolean tensor with the same shape as image_or_video indicating which pixels to compute loss .
214
+ Output:
215
+ - dmd_loss: a scalar tensor representing the DMD loss.
216
+ - dmd_log_dict: a dictionary containing the intermediate tensors for logging.
217
+ """
218
+ original_latent = image_or_video
219
+
220
+ batch_size, num_frame = image_or_video.shape[:2]
221
+
222
+ with torch.no_grad():
223
+ # Step 1: Randomly sample timestep based on the given schedule and corresponding noise
224
+ timestep = torch.randint(
225
+ 0,
226
+ self.num_train_timestep,
227
+ [batch_size, num_frame],
228
+ device=self.device,
229
+ dtype=torch.long
230
+ )
231
+
232
+ timestep = self._process_timestep(
233
+ timestep, type=self.real_task_type)
234
+
235
+ # TODO: Add timestep warping
236
+ if self.timestep_shift > 1:
237
+ timestep = self.timestep_shift * \
238
+ (timestep / 1000) / \
239
+ (1 + (self.timestep_shift - 1) * (timestep / 1000)) * 1000
240
+ timestep = timestep.clamp(self.min_step, self.max_step)
241
+
242
+ noise = torch.randn_like(image_or_video)
243
+ noisy_latent = self.scheduler.add_noise(
244
+ image_or_video.flatten(0, 1),
245
+ noise.flatten(0, 1),
246
+ timestep.flatten(0, 1)
247
+ ).detach().unflatten(0, (batch_size, num_frame))
248
+
249
+ # Step 2: Compute the KL grad
250
+ grad, dmd_log_dict = self._compute_kl_grad(
251
+ noisy_image_or_video=noisy_latent,
252
+ estimated_clean_image_or_video=original_latent,
253
+ timestep=timestep,
254
+ conditional_dict=conditional_dict,
255
+ unconditional_dict=unconditional_dict
256
+ )
257
+
258
+ if gradient_mask is not None:
259
+ dmd_loss = 0.5 * F.mse_loss(original_latent.double()[gradient_mask], (original_latent.double() - grad.double()).detach()[gradient_mask], reduction="mean")
260
+ else:
261
+ dmd_loss = 0.5 * F.mse_loss(original_latent.double(), (original_latent.double() - grad.double()).detach(), reduction="mean")
262
+ return dmd_loss, dmd_log_dict
263
+
264
+ def _initialize_inference_pipeline(self):
265
+ """
266
+ Lazy initialize the inference pipeline during the first backward simulation run.
267
+ Here we encapsulate the inference code with a model-dependent outside function.
268
+ We pass our FSDP-wrapped modules into the pipeline to save memory.
269
+ """
270
+ self.inference_pipeline = get_inference_pipeline_wrapper(
271
+ self.generator_model_name,
272
+ denoising_step_list=self.denoising_step_list,
273
+ scheduler=self.scheduler,
274
+ generator=self.generator,
275
+ num_frame_per_block=self.num_frame_per_block
276
+ )
277
+
278
+ @torch.no_grad()
279
+ def _consistency_backward_simulation(self, noise: torch.Tensor, conditional_dict: dict) -> torch.Tensor:
280
+ """
281
+ Simulate the generator's input from noise to avoid training/inference mismatch.
282
+ See Sec 4.5 of the DMD2 paper (https://arxiv.org/abs/2405.14867) for details.
283
+ Here we use the consistency sampler (https://arxiv.org/abs/2303.01469)
284
+ Input:
285
+ - noise: a tensor sampled from N(0, 1) with shape [B, F, C, H, W] where the number of frame is 1 for images.
286
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
287
+ Output:
288
+ - output: a tensor with shape [B, T, F, C, H, W].
289
+ T is the total number of timesteps. output[0] is a pure noise and output[i] and i>0
290
+ represents the x0 prediction at each timestep.
291
+ """
292
+ if self.inference_pipeline is None:
293
+ self._initialize_inference_pipeline()
294
+
295
+ return self.inference_pipeline.inference_with_trajectory(noise=noise, conditional_dict=conditional_dict)
296
+
297
+ def _run_generator(self, image_or_video_shape, conditional_dict: dict, unconditional_dict: dict, clean_latent: torch.tensor) -> Tuple[torch.Tensor, torch.Tensor]:
298
+ """
299
+ Optionally simulate the generator's input from noise using backward simulation
300
+ and then run the generator for one-step.
301
+ Input:
302
+ - image_or_video_shape: a list containing the shape of the image or video [B, F, C, H, W].
303
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
304
+ - unconditional_dict: a dictionary containing the unconditional information (e.g. null/negative text embeddings, null/negative image embeddings).
305
+ - clean_latent: a tensor containing the clean latents [B, F, C, H, W]. Need to be passed when no backward simulation is used.
306
+ Output:
307
+ - pred_image: a tensor with shape [B, F, C, H, W].
308
+ """
309
+ # Step 1: Sample noise and backward simulate the generator's input
310
+ if getattr(self.args, "backward_simulation", True):
311
+ simulated_noisy_input = self._consistency_backward_simulation(
312
+ noise=torch.randn(image_or_video_shape,
313
+ device=self.device, dtype=self.dtype),
314
+ conditional_dict=conditional_dict
315
+ )
316
+ else:
317
+ simulated_noisy_input = []
318
+ for timestep in self.denoising_step_list:
319
+ noise = torch.randn(
320
+ image_or_video_shape, device=self.device, dtype=self.dtype)
321
+
322
+ noisy_timestep = timestep * torch.ones(
323
+ image_or_video_shape[:2], device=self.device, dtype=torch.long)
324
+
325
+ if timestep != 0:
326
+ noisy_image = self.scheduler.add_noise(
327
+ clean_latent.flatten(0, 1),
328
+ noise.flatten(0, 1),
329
+ noisy_timestep.flatten(0, 1)
330
+ ).unflatten(0, image_or_video_shape[:2])
331
+ else:
332
+ noisy_image = clean_latent
333
+
334
+ simulated_noisy_input.append(noisy_image)
335
+
336
+ simulated_noisy_input = torch.stack(simulated_noisy_input, dim=1)
337
+
338
+ # Step 2: Randomly sample a timestep and pick the corresponding input
339
+ index = torch.randint(0, len(self.denoising_step_list), [image_or_video_shape[0], image_or_video_shape[1]], device=self.device, dtype=torch.long)
340
+ index = self._process_timestep(index, type=self.generator_task_type)
341
+
342
+ # select the corresponding timestep's noisy input from the stacked tensor [B, T, F, C, H, W]
343
+ noisy_input = torch.gather(
344
+ simulated_noisy_input, dim=1,
345
+ index=index.reshape(index.shape[0], 1, index.shape[1], 1, 1, 1).expand(
346
+ -1, -1, -1, *image_or_video_shape[2:])
347
+ ).squeeze(1)
348
+
349
+ timestep = self.denoising_step_list[index]
350
+
351
+ pred_image_or_video = self.generator(
352
+ noisy_image_or_video=noisy_input,
353
+ conditional_dict=conditional_dict,
354
+ timestep=timestep
355
+ )
356
+
357
+ gradient_mask = None # timestep != 0
358
+
359
+ # pred_image_or_video = noisy_input * \
360
+ # (1-gradient_mask.float()).reshape(*gradient_mask.shape, 1, 1, 1) + \
361
+ # pred_image_or_video * gradient_mask.float().reshape(*gradient_mask.shape, 1, 1, 1)
362
+
363
+ pred_image_or_video = pred_image_or_video.type_as(noisy_input)
364
+
365
+ return pred_image_or_video, gradient_mask
366
+
367
+ def generator_loss(self, image_or_video_shape, conditional_dict: dict, unconditional_dict: dict, clean_latent: torch.Tensor) -> Tuple[torch.Tensor, dict]:
368
+ """
369
+ Generate image/videos from noise and compute the DMD loss.
370
+ The noisy input to the generator is backward simulated.
371
+ This removes the need of any datasets during distillation.
372
+ See Sec 4.5 of the DMD2 paper (https://arxiv.org/abs/2405.14867) for details.
373
+ Input:
374
+ - image_or_video_shape: a list containing the shape of the image or video [B, F, C, H, W].
375
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
376
+ - unconditional_dict: a dictionary containing the unconditional information (e.g. null/negative text embeddings, null/negative image embeddings).
377
+ - clean_latent: a tensor containing the clean latents [B, F, C, H, W]. Need to be passed when no backward simulation is used.
378
+ Output:
379
+ - loss: a scalar tensor representing the generator loss.
380
+ - generator_log_dict: a dictionary containing the intermediate tensors for logging.
381
+ """
382
+ # Step 1: Run generator on backward simulated noisy input
383
+ pred_image, gradient_mask = self._run_generator(
384
+ image_or_video_shape=image_or_video_shape,
385
+ conditional_dict=conditional_dict,
386
+ unconditional_dict=unconditional_dict,
387
+ clean_latent=clean_latent
388
+ )
389
+
390
+ # Step 2: Compute the DMD loss
391
+ dmd_loss, dmd_log_dict = self.compute_distribution_matching_loss(
392
+ image_or_video=pred_image,
393
+ conditional_dict=conditional_dict,
394
+ unconditional_dict=unconditional_dict,
395
+ gradient_mask=gradient_mask
396
+ )
397
+
398
+ # Step 3: TODO: Implement the GAN loss
399
+
400
+ return dmd_loss, dmd_log_dict
401
+
402
+ def critic_loss(self, image_or_video_shape, conditional_dict: dict, unconditional_dict: dict, clean_latent: torch.Tensor) -> Tuple[torch.Tensor, dict]:
403
+ """
404
+ Generate image/videos from noise and train the critic with generated samples.
405
+ The noisy input to the generator is backward simulated.
406
+ This removes the need of any datasets during distillation.
407
+ See Sec 4.5 of the DMD2 paper (https://arxiv.org/abs/2405.14867) for details.
408
+ Input:
409
+ - image_or_video_shape: a list containing the shape of the image or video [B, F, C, H, W].
410
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
411
+ - unconditional_dict: a dictionary containing the unconditional information (e.g. null/negative text embeddings, null/negative image embeddings).
412
+ - clean_latent: a tensor containing the clean latents [B, F, C, H, W]. Need to be passed when no backward simulation is used.
413
+ Output:
414
+ - loss: a scalar tensor representing the generator loss.
415
+ - critic_log_dict: a dictionary containing the intermediate tensors for logging.
416
+ """
417
+
418
+ # Step 1: Run generator on backward simulated noisy input
419
+ with torch.no_grad():
420
+ generated_image, _ = self._run_generator(
421
+ image_or_video_shape=image_or_video_shape,
422
+ conditional_dict=conditional_dict,
423
+ unconditional_dict=unconditional_dict,
424
+ clean_latent=clean_latent
425
+ )
426
+
427
+ # Step 2: Compute the fake prediction
428
+ critic_timestep = torch.randint(
429
+ 0,
430
+ self.num_train_timestep,
431
+ image_or_video_shape[:2],
432
+ device=self.device,
433
+ dtype=torch.long
434
+ )
435
+ critic_timestep = self._process_timestep(
436
+ critic_timestep, type=self.fake_task_type)
437
+
438
+ # TODO: Add timestep warping
439
+ if self.timestep_shift > 1:
440
+ critic_timestep = self.timestep_shift * \
441
+ (critic_timestep / 1000) / (1 + (self.timestep_shift - 1) * (critic_timestep / 1000)) * 1000
442
+
443
+ critic_timestep = critic_timestep.clamp(self.min_step, self.max_step)
444
+
445
+ critic_noise = torch.randn_like(generated_image)
446
+ noisy_generated_image = self.scheduler.add_noise(
447
+ generated_image.flatten(0, 1),
448
+ critic_noise.flatten(0, 1),
449
+ critic_timestep.flatten(0, 1)
450
+ ).unflatten(0, image_or_video_shape[:2])
451
+
452
+ pred_fake_image = self.fake_score(
453
+ noisy_image_or_video=noisy_generated_image,
454
+ conditional_dict=conditional_dict,
455
+ timestep=critic_timestep
456
+ )
457
+
458
+ # Step 3: Compute the denoising loss for the fake critic
459
+ if self.args.denoising_loss_type == "flow":
460
+ assert "wan" in self.args.model_name
461
+ from causvid.models.wan.wan_wrapper import WanDiffusionWrapper
462
+ flow_pred = WanDiffusionWrapper._convert_x0_to_flow_pred(
463
+ scheduler=self.scheduler,
464
+ x0_pred=pred_fake_image.flatten(0, 1),
465
+ xt=noisy_generated_image.flatten(0, 1),
466
+ timestep=critic_timestep.flatten(0, 1)
467
+ )
468
+ pred_fake_noise = None
469
+ else:
470
+ flow_pred = None
471
+ pred_fake_noise = self.scheduler.convert_x0_to_noise(
472
+ x0=pred_fake_image.flatten(0, 1),
473
+ xt=noisy_generated_image.flatten(0, 1),
474
+ timestep=critic_timestep.flatten(0, 1)
475
+ ).unflatten(0, image_or_video_shape[:2])
476
+
477
+ denoising_loss = self.denoising_loss_func(
478
+ x=generated_image.flatten(0, 1),
479
+ x_pred=pred_fake_image.flatten(0, 1),
480
+ noise=critic_noise.flatten(0, 1),
481
+ noise_pred=pred_fake_noise,
482
+ alphas_cumprod=self.scheduler.alphas_cumprod,
483
+ timestep=critic_timestep.flatten(0, 1),
484
+ flow_pred=flow_pred
485
+ )
486
+
487
+ # Step 4: TODO: Compute the GAN loss
488
+
489
+ # Step 5: Debugging Log
490
+ critic_log_dict = {
491
+ "critictrain_latent": generated_image.detach(),
492
+ "critictrain_noisy_latent": noisy_generated_image.detach(),
493
+ "critictrain_pred_image": pred_fake_image.detach(),
494
+ "critic_timestep": critic_timestep.detach()
495
+ }
496
+
497
+ return denoising_loss, critic_log_dict
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/captions_coco14_test.txt ADDED
The diff for this file is too large to render. See raw diff
 
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/__init__.py ADDED
File without changes
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/clip_features.py ADDED
@@ -0,0 +1,38 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install git+https://github.com/openai/CLIP.git
2
+ import pdb
3
+ from PIL import Image
4
+ import numpy as np
5
+ import torch
6
+ import torchvision.transforms as transforms
7
+ import clip
8
+ from causvid.evaluation.coco_eval.cleanfid.fid import compute_fid
9
+
10
+
11
+ def img_preprocess_clip(img_np):
12
+ x = Image.fromarray(img_np.astype(np.uint8)).convert("RGB")
13
+ T = transforms.Compose([
14
+ transforms.Resize(
15
+ 224, interpolation=transforms.InterpolationMode.BICUBIC),
16
+ transforms.CenterCrop(224),
17
+ ])
18
+ return np.asarray(T(x)).clip(0, 255).astype(np.uint8)
19
+
20
+
21
+ class CLIP_fx():
22
+ def __init__(self, name="ViT-B/32", device="cuda"):
23
+ self.model, _ = clip.load(name, device=device)
24
+ self.model.eval()
25
+ self.name = "clip_" + name.lower().replace("-", "_").replace("/", "_")
26
+
27
+ def __call__(self, img_t):
28
+ img_x = img_t / 255.0
29
+ T_norm = transforms.Normalize(
30
+ (0.48145466, 0.4578275, 0.40821073), (0.26862954, 0.26130258, 0.27577711))
31
+ img_x = T_norm(img_x)
32
+ assert torch.is_tensor(img_x)
33
+ if len(img_x.shape) == 3:
34
+ img_x = img_x.unsqueeze(0)
35
+ B, C, H, W = img_x.shape
36
+ with torch.no_grad():
37
+ z = self.model.encode_image(img_x)
38
+ return z
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/downloads_helper.py ADDED
@@ -0,0 +1,73 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import urllib.request
3
+ import requests
4
+ import shutil
5
+
6
+
7
+ inception_url = "https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt"
8
+
9
+
10
+ """
11
+ Download the pretrined inception weights if it does not exists
12
+ ARGS:
13
+ fpath - output folder path
14
+ """
15
+
16
+
17
+ def check_download_inception(fpath="./"):
18
+ inception_path = os.path.join(fpath, "inception-2015-12-05.pt")
19
+ if not os.path.exists(inception_path):
20
+ # download the file
21
+ with urllib.request.urlopen(inception_url) as response, open(inception_path, 'wb') as f:
22
+ shutil.copyfileobj(response, f)
23
+ return inception_path
24
+
25
+
26
+ """
27
+ Download any url if it does not exist
28
+ ARGS:
29
+ local_folder - output folder path
30
+ url - the weburl to download
31
+ """
32
+
33
+
34
+ def check_download_url(local_folder, url):
35
+ name = os.path.basename(url)
36
+ local_path = os.path.join(local_folder, name)
37
+ if not os.path.exists(local_path):
38
+ os.makedirs(local_folder, exist_ok=True)
39
+ print(f"downloading statistics to {local_path}")
40
+ with urllib.request.urlopen(url) as response, open(local_path, 'wb') as f:
41
+ shutil.copyfileobj(response, f)
42
+ return local_path
43
+
44
+
45
+ """
46
+ Download a file from google drive
47
+ ARGS:
48
+ file_id - id of the google drive file
49
+ out_path - output folder path
50
+ """
51
+
52
+
53
+ def download_google_drive(file_id, out_path):
54
+ def get_confirm_token(response):
55
+ for key, value in response.cookies.items():
56
+ if key.startswith('download_warning'):
57
+ return value
58
+ return None
59
+
60
+ URL = "https://drive.google.com/uc?export=download"
61
+ session = requests.Session()
62
+ response = session.get(URL, params={'id': file_id}, stream=True)
63
+ token = get_confirm_token(response)
64
+
65
+ if token:
66
+ params = {'id': file_id, 'confirm': token}
67
+ response = session.get(URL, params=params, stream=True)
68
+
69
+ CHUNK_SIZE = 32768
70
+ with open(out_path, "wb") as f:
71
+ for chunk in response.iter_content(CHUNK_SIZE):
72
+ if chunk:
73
+ f.write(chunk)
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/features.py ADDED
@@ -0,0 +1,85 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ helpers for extracting features from image
3
+ """
4
+ import os
5
+ import platform
6
+ import numpy as np
7
+ import torch
8
+ import causvid.evaluation.coco_eval.cleanfid
9
+ from causvid.evaluation.coco_eval.cleanfid.downloads_helper import check_download_url
10
+ from causvid.evaluation.coco_eval.cleanfid.inception_pytorch import InceptionV3
11
+ from causvid.evaluation.coco_eval.cleanfid.inception_torchscript import InceptionV3W
12
+
13
+
14
+ """
15
+ returns a functions that takes an image in range [0,255]
16
+ and outputs a feature embedding vector
17
+ """
18
+
19
+
20
+ def feature_extractor(name="torchscript_inception", device=torch.device("cuda"), resize_inside=False, use_dataparallel=True):
21
+ if name == "torchscript_inception":
22
+ path = "./" if platform.system() == "Windows" else "/tmp"
23
+ model = InceptionV3W(path, download=True, resize_inside=resize_inside).to(device)
24
+ model.eval()
25
+ if use_dataparallel:
26
+ model = torch.nn.DataParallel(model)
27
+
28
+ def model_fn(x): return model(x)
29
+ elif name == "pytorch_inception":
30
+ model = InceptionV3(output_blocks=[3], resize_input=False).to(device)
31
+ model.eval()
32
+ if use_dataparallel:
33
+ model = torch.nn.DataParallel(model)
34
+
35
+ def model_fn(x): return model(x / 255)[0].squeeze(-1).squeeze(-1)
36
+ else:
37
+ raise ValueError(f"{name} feature extractor not implemented")
38
+ return model_fn
39
+
40
+
41
+ """
42
+ Build a feature extractor for each of the modes
43
+ """
44
+
45
+
46
+ def build_feature_extractor(mode, device=torch.device("cuda"), use_dataparallel=True):
47
+ if mode == "legacy_pytorch":
48
+ feat_model = feature_extractor(name="pytorch_inception", resize_inside=False, device=device, use_dataparallel=use_dataparallel)
49
+ elif mode == "legacy_tensorflow":
50
+ feat_model = feature_extractor(name="torchscript_inception", resize_inside=True, device=device, use_dataparallel=use_dataparallel)
51
+ elif mode == "clean":
52
+ feat_model = feature_extractor(name="torchscript_inception", resize_inside=False, device=device, use_dataparallel=use_dataparallel)
53
+ return feat_model
54
+
55
+
56
+ """
57
+ Load precomputed reference statistics for commonly used datasets
58
+ """
59
+
60
+
61
+ def get_reference_statistics(name, res, mode="clean", model_name="inception_v3", seed=0, split="test", metric="FID"):
62
+ base_url = "https://www.cs.cmu.edu/~clean-fid/stats/"
63
+ if split == "custom":
64
+ res = "na"
65
+ if model_name == "inception_v3":
66
+ model_modifier = ""
67
+ else:
68
+ model_modifier = "_" + model_name
69
+ if metric == "FID":
70
+ rel_path = (f"{name}_{mode}{model_modifier}_{split}_{res}.npz").lower()
71
+ url = f"{base_url}/{rel_path}"
72
+ mod_path = os.path.dirname(cleanfid.__file__)
73
+ stats_folder = os.path.join(mod_path, "stats")
74
+ fpath = check_download_url(local_folder=stats_folder, url=url)
75
+ stats = np.load(fpath)
76
+ mu, sigma = stats["mu"], stats["sigma"]
77
+ return mu, sigma
78
+ elif metric == "KID":
79
+ rel_path = (f"{name}_{mode}{model_modifier}_{split}_{res}_kid.npz").lower()
80
+ url = f"{base_url}/{rel_path}"
81
+ mod_path = os.path.dirname(cleanfid.__file__)
82
+ stats_folder = os.path.join(mod_path, "stats")
83
+ fpath = check_download_url(local_folder=stats_folder, url=url)
84
+ stats = np.load(fpath)
85
+ return stats["feats"]
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/fid.py ADDED
@@ -0,0 +1,635 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import random
3
+ from tqdm import tqdm
4
+ from glob import glob
5
+ import torch
6
+ import numpy as np
7
+ from PIL import Image
8
+ from scipy import linalg
9
+ import zipfile
10
+ from causvid.evaluation.coco_eval import cleanfid
11
+ from causvid.evaluation.coco_eval.cleanfid.utils import *
12
+ from causvid.evaluation.coco_eval.cleanfid.features import build_feature_extractor, get_reference_statistics
13
+ from causvid.evaluation.coco_eval.cleanfid.resize import *
14
+
15
+
16
+ """
17
+ Numpy implementation of the Frechet Distance.
18
+ The Frechet distance between two multivariate Gaussians X_1 ~ N(mu_1, C_1)
19
+ and X_2 ~ N(mu_2, C_2) is
20
+ d^2 = ||mu_1 - mu_2||^2 + Tr(C_1 + C_2 - 2*sqrt(C_1*C_2)).
21
+ Stable version by Danica J. Sutherland.
22
+ Params:
23
+ mu1 : Numpy array containing the activations of a layer of the
24
+ inception net (like returned by the function 'get_predictions')
25
+ for generated samples.
26
+ mu2 : The sample mean over activations, precalculated on an
27
+ representative data set.
28
+ sigma1: The covariance matrix over activations for generated samples.
29
+ sigma2: The covariance matrix over activations, precalculated on an
30
+ representative data set.
31
+ """
32
+
33
+
34
+ def frechet_distance(mu1, sigma1, mu2, sigma2, eps=1e-6):
35
+ mu1 = np.atleast_1d(mu1)
36
+ mu2 = np.atleast_1d(mu2)
37
+ sigma1 = np.atleast_2d(sigma1)
38
+ sigma2 = np.atleast_2d(sigma2)
39
+
40
+ assert mu1.shape == mu2.shape, \
41
+ 'Training and test mean vectors have different lengths'
42
+ assert sigma1.shape == sigma2.shape, \
43
+ 'Training and test covariances have different dimensions'
44
+
45
+ diff = mu1 - mu2
46
+
47
+ # Product might be almost singular
48
+ covmean, _ = linalg.sqrtm(sigma1.dot(sigma2), disp=False)
49
+ if not np.isfinite(covmean).all():
50
+ msg = ('fid calculation produces singular product; '
51
+ 'adding %s to diagonal of cov estimates') % eps
52
+ print(msg)
53
+ offset = np.eye(sigma1.shape[0]) * eps
54
+ covmean = linalg.sqrtm((sigma1 + offset).dot(sigma2 + offset))
55
+
56
+ # Numerical error might give slight imaginary component
57
+ if np.iscomplexobj(covmean):
58
+ if not np.allclose(np.diagonal(covmean).imag, 0, atol=1e-3):
59
+ m = np.max(np.abs(covmean.imag))
60
+ raise ValueError('Imaginary component {}'.format(m))
61
+ covmean = covmean.real
62
+
63
+ tr_covmean = np.trace(covmean)
64
+
65
+ return (diff.dot(diff) + np.trace(sigma1) + np.trace(sigma2) - 2 * tr_covmean)
66
+
67
+
68
+ """
69
+ Compute the KID score given the sets of features
70
+ """
71
+
72
+
73
+ def kernel_distance(feats1, feats2, num_subsets=100, max_subset_size=1000):
74
+ n = feats1.shape[1]
75
+ m = min(min(feats1.shape[0], feats2.shape[0]), max_subset_size)
76
+ t = 0
77
+ for _subset_idx in range(num_subsets):
78
+ x = feats2[np.random.choice(feats2.shape[0], m, replace=False)]
79
+ y = feats1[np.random.choice(feats1.shape[0], m, replace=False)]
80
+ a = (x @ x.T / n + 1) ** 3 + (y @ y.T / n + 1) ** 3
81
+ b = (x @ y.T / n + 1) ** 3
82
+ t += (a.sum() - np.diag(a).sum()) / (m - 1) - b.sum() * 2 / m
83
+ kid = t / num_subsets / m
84
+ return float(kid)
85
+
86
+
87
+ """
88
+ Compute the inception features for a batch of images
89
+ """
90
+
91
+
92
+ def get_batch_features(batch, model, device):
93
+ with torch.no_grad():
94
+ feat = model(batch.to(device))
95
+ return feat.detach().cpu().numpy()
96
+
97
+
98
+ """
99
+ Compute the inception features for a list of files
100
+ """
101
+
102
+
103
+ def get_files_features(l_files, model=None, num_workers=12,
104
+ batch_size=128, device=torch.device("cuda"),
105
+ mode="clean", custom_fn_resize=None,
106
+ description="", fdir=None, verbose=True,
107
+ custom_image_tranform=None):
108
+ # wrap the images in a dataloader for parallelizing the resize operation
109
+ dataset = ResizeDataset(l_files, fdir=fdir, mode=mode)
110
+ if custom_image_tranform is not None:
111
+ dataset.custom_image_tranform = custom_image_tranform
112
+ if custom_fn_resize is not None:
113
+ dataset.fn_resize = custom_fn_resize
114
+
115
+ dataloader = torch.utils.data.DataLoader(dataset,
116
+ batch_size=batch_size, shuffle=False,
117
+ drop_last=False, num_workers=num_workers)
118
+
119
+ # collect all inception features
120
+ l_feats = []
121
+ if verbose:
122
+ pbar = tqdm(dataloader, desc=description)
123
+ else:
124
+ pbar = dataloader
125
+
126
+ for batch in pbar:
127
+ l_feats.append(get_batch_features(batch, model, device))
128
+ np_feats = np.concatenate(l_feats)
129
+ return np_feats
130
+
131
+
132
+ """
133
+ Compute the inception features for a numpy array
134
+ """
135
+
136
+
137
+ def get_array_features(l_array, model=None, num_workers=12,
138
+ batch_size=128, device=torch.device("cuda"),
139
+ mode="clean", custom_fn_resize=None,
140
+ description="", verbose=True,
141
+ custom_image_tranform=None):
142
+ # wrap the images in a dataloader for parallelizing the resize operation
143
+ dataset = ResizeArrayDataset(l_array, mode=mode)
144
+ if custom_image_tranform is not None:
145
+ dataset.custom_image_tranform = custom_image_tranform
146
+ if custom_fn_resize is not None:
147
+ dataset.fn_resize = custom_fn_resize
148
+
149
+ dataloader = torch.utils.data.DataLoader(dataset,
150
+ batch_size=batch_size, shuffle=False,
151
+ drop_last=False, num_workers=num_workers)
152
+
153
+ # collect all inception features
154
+ l_feats = []
155
+ if verbose:
156
+ pbar = tqdm(dataloader, desc=description)
157
+ else:
158
+ pbar = dataloader
159
+
160
+ for batch in pbar:
161
+ l_feats.append(get_batch_features(batch, model, device))
162
+ np_feats = np.concatenate(l_feats)
163
+ return np_feats
164
+
165
+
166
+ """
167
+ Compute the inception features for a folder of image files
168
+ """
169
+
170
+
171
+ def get_folder_features(fdir, model=None, num_workers=12, num=None,
172
+ shuffle=False, seed=0, batch_size=128, device=torch.device("cuda"),
173
+ mode="clean", custom_fn_resize=None, description="", verbose=True,
174
+ custom_image_tranform=None):
175
+ # get all relevant files in the dataset
176
+ if ".zip" in fdir:
177
+ files = list(set(zipfile.ZipFile(fdir).namelist()))
178
+ # remove the non-image files inside the zip
179
+ files = [x for x in files if os.path.splitext(x)[1].lower()[
180
+ 1:] in EXTENSIONS]
181
+ else:
182
+ files = sorted([file for ext in EXTENSIONS
183
+ for file in glob(os.path.join(fdir, f"**/*.{ext}"), recursive=True)])
184
+ if verbose:
185
+ print(f"Found {len(files)} images in the folder {fdir}")
186
+ # use a subset number of files if needed
187
+ if num is not None:
188
+ if shuffle:
189
+ random.seed(seed)
190
+ random.shuffle(files)
191
+ files = files[:num]
192
+ np_feats = get_files_features(files, model, num_workers=num_workers,
193
+ batch_size=batch_size, device=device, mode=mode,
194
+ custom_fn_resize=custom_fn_resize,
195
+ custom_image_tranform=custom_image_tranform,
196
+ description=description, fdir=fdir, verbose=verbose)
197
+ return np_feats
198
+
199
+
200
+ """
201
+ Compute the FID score given the inception features stack
202
+ """
203
+
204
+
205
+ def fid_from_feats(feats1, feats2):
206
+ mu1, sig1 = np.mean(feats1, axis=0), np.cov(feats1, rowvar=False)
207
+ mu2, sig2 = np.mean(feats2, axis=0), np.cov(feats2, rowvar=False)
208
+ return frechet_distance(mu1, sig1, mu2, sig2)
209
+
210
+
211
+ """
212
+ Computes the FID score for a folder of images for a specific dataset
213
+ and a specific resolution
214
+ """
215
+
216
+
217
+ def fid_folder(fdir, dataset_name, dataset_res, dataset_split,
218
+ model=None, mode="clean", model_name="inception_v3", num_workers=12,
219
+ batch_size=128, device=torch.device("cuda"), verbose=True,
220
+ custom_image_tranform=None, custom_fn_resize=None):
221
+ # Load reference FID statistics (download if needed)
222
+ ref_mu, ref_sigma = get_reference_statistics(dataset_name, dataset_res,
223
+ mode=mode, model_name=model_name, seed=0, split=dataset_split)
224
+ fbname = os.path.basename(fdir)
225
+ # get all inception features for folder images
226
+ np_feats = get_folder_features(fdir, model, num_workers=num_workers,
227
+ batch_size=batch_size, device=device,
228
+ mode=mode, description=f"FID {fbname} : ", verbose=verbose,
229
+ custom_image_tranform=custom_image_tranform,
230
+ custom_fn_resize=custom_fn_resize)
231
+ mu = np.mean(np_feats, axis=0)
232
+ sigma = np.cov(np_feats, rowvar=False)
233
+ fid = frechet_distance(mu, sigma, ref_mu, ref_sigma)
234
+ return fid
235
+
236
+
237
+ """
238
+ Compute the FID stats from a generator model
239
+ """
240
+
241
+
242
+ def get_model_features(G, model, mode="clean", z_dim=512,
243
+ num_gen=50_000, batch_size=128, device=torch.device("cuda"),
244
+ desc="FID model: ", verbose=True, return_z=False,
245
+ custom_image_tranform=None, custom_fn_resize=None):
246
+ if custom_fn_resize is None:
247
+ fn_resize = build_resizer(mode)
248
+ else:
249
+ fn_resize = custom_fn_resize
250
+
251
+ # Generate test features
252
+ num_iters = int(np.ceil(num_gen / batch_size))
253
+ l_feats = []
254
+ latents = []
255
+ if verbose:
256
+ pbar = tqdm(range(num_iters), desc=desc)
257
+ else:
258
+ pbar = range(num_iters)
259
+ for idx in pbar:
260
+ with torch.no_grad():
261
+ z_batch = torch.randn((batch_size, z_dim)).to(device)
262
+ if return_z:
263
+ latents.append(z_batch)
264
+ # generated image is in range [0,255]
265
+ img_batch = G(z_batch)
266
+ # split into individual batches for resizing if needed
267
+ if mode != "legacy_tensorflow":
268
+ l_resized_batch = []
269
+ for idx in range(batch_size):
270
+ curr_img = img_batch[idx]
271
+ img_np = curr_img.cpu().numpy().transpose((1, 2, 0))
272
+ if custom_image_tranform is not None:
273
+ img_np = custom_image_tranform(img_np)
274
+ img_resize = fn_resize(img_np)
275
+ l_resized_batch.append(torch.tensor(
276
+ img_resize.transpose((2, 0, 1))).unsqueeze(0))
277
+ resized_batch = torch.cat(l_resized_batch, dim=0)
278
+ else:
279
+ resized_batch = img_batch
280
+ feat = get_batch_features(resized_batch, model, device)
281
+ l_feats.append(feat)
282
+ np_feats = np.concatenate(l_feats)[:num_gen]
283
+ if return_z:
284
+ latents = torch.cat(latents, 0)
285
+ return np_feats, latents
286
+ return np_feats
287
+
288
+
289
+ """
290
+ Computes the FID score for a generator model for a specific dataset
291
+ and a specific resolution
292
+ """
293
+
294
+
295
+ def fid_model(G, dataset_name, dataset_res, dataset_split,
296
+ model=None, model_name="inception_v3", z_dim=512, num_gen=50_000,
297
+ mode="clean", num_workers=0, batch_size=128,
298
+ device=torch.device("cuda"), verbose=True,
299
+ custom_image_tranform=None, custom_fn_resize=None):
300
+ # Load reference FID statistics (download if needed)
301
+ ref_mu, ref_sigma = get_reference_statistics(dataset_name, dataset_res,
302
+ mode=mode, model_name=model_name,
303
+ seed=0, split=dataset_split)
304
+ # Generate features of images generated by the model
305
+ np_feats = get_model_features(G, model, mode=mode,
306
+ z_dim=z_dim, num_gen=num_gen,
307
+ batch_size=batch_size, device=device, verbose=verbose,
308
+ custom_image_tranform=custom_image_tranform, custom_fn_resize=custom_fn_resize)
309
+ mu = np.mean(np_feats, axis=0)
310
+ sigma = np.cov(np_feats, rowvar=False)
311
+ fid = frechet_distance(mu, sigma, ref_mu, ref_sigma)
312
+ return fid
313
+
314
+
315
+ """
316
+ Computes the FID score between the two given folders
317
+ """
318
+
319
+
320
+ def compare_folders(fdir1, fdir2, feat_model, mode, num_workers=0,
321
+ batch_size=8, device=torch.device("cuda"), verbose=True,
322
+ custom_image_tranform=None, custom_fn_resize=None):
323
+ # get all inception features for the first folder
324
+ fbname1 = os.path.basename(fdir1)
325
+ np_feats1 = get_folder_features(fdir1, feat_model, num_workers=num_workers,
326
+ batch_size=batch_size, device=device, mode=mode,
327
+ description=f"FID {fbname1} : ", verbose=verbose,
328
+ custom_image_tranform=custom_image_tranform,
329
+ custom_fn_resize=custom_fn_resize)
330
+ mu1 = np.mean(np_feats1, axis=0)
331
+ sigma1 = np.cov(np_feats1, rowvar=False)
332
+ # get all inception features for the second folder
333
+ fbname2 = os.path.basename(fdir2)
334
+ np_feats2 = get_folder_features(fdir2, feat_model, num_workers=num_workers,
335
+ batch_size=batch_size, device=device, mode=mode,
336
+ description=f"FID {fbname2} : ", verbose=verbose,
337
+ custom_image_tranform=custom_image_tranform,
338
+ custom_fn_resize=custom_fn_resize)
339
+ mu2 = np.mean(np_feats2, axis=0)
340
+ sigma2 = np.cov(np_feats2, rowvar=False)
341
+ fid = frechet_distance(mu1, sigma1, mu2, sigma2)
342
+ return fid
343
+
344
+
345
+ """
346
+ Test if a custom statistic exists
347
+ """
348
+
349
+
350
+ def test_stats_exists(name, mode, model_name="inception_v3", metric="FID"):
351
+ stats_folder = os.path.join(os.path.dirname(cleanfid.__file__), "stats")
352
+ split, res = "custom", "na"
353
+ if model_name == "inception_v3":
354
+ model_modifier = ""
355
+ else:
356
+ model_modifier = "_" + model_name
357
+ if metric == "FID":
358
+ fname = f"{name}_{mode}{model_modifier}_{split}_{res}.npz"
359
+ elif metric == "KID":
360
+ fname = f"{name}_{mode}{model_modifier}_{split}_{res}_kid.npz"
361
+ fpath = os.path.join(stats_folder, fname)
362
+ return os.path.exists(fpath)
363
+
364
+
365
+ """
366
+ Remove the custom FID features from the stats folder
367
+ """
368
+
369
+
370
+ def remove_custom_stats(name, mode="clean", model_name="inception_v3"):
371
+ stats_folder = os.path.join(os.path.dirname(cleanfid.__file__), "stats")
372
+ # remove the FID stats
373
+ split, res = "custom", "na"
374
+ if model_name == "inception_v3":
375
+ model_modifier = ""
376
+ else:
377
+ model_modifier = "_" + model_name
378
+ outf = os.path.join(
379
+ stats_folder, f"{name}_{mode}{model_modifier}_{split}_{res}.npz".lower())
380
+ if not os.path.exists(outf):
381
+ msg = f"The stats file {name} does not exist."
382
+ raise Exception(msg)
383
+ os.remove(outf)
384
+ # remove the KID stats
385
+ outf = os.path.join(
386
+ stats_folder, f"{name}_{mode}{model_modifier}_{split}_{res}_kid.npz")
387
+ if not os.path.exists(outf):
388
+ msg = f"The stats file {name} does not exist."
389
+ raise Exception(msg)
390
+ os.remove(outf)
391
+
392
+
393
+ """
394
+ Cache a custom dataset statistics file
395
+ """
396
+
397
+
398
+ def make_custom_stats(name, fdir, num=None, mode="clean", model_name="inception_v3",
399
+ num_workers=0, batch_size=64, device=torch.device("cuda"), verbose=True):
400
+ stats_folder = os.path.join(os.path.dirname(cleanfid.__file__), "stats")
401
+ os.makedirs(stats_folder, exist_ok=True)
402
+ split, res = "custom", "na"
403
+ if model_name == "inception_v3":
404
+ model_modifier = ""
405
+ else:
406
+ model_modifier = "_" + model_name
407
+ outf = os.path.join(
408
+ stats_folder, f"{name}_{mode}{model_modifier}_{split}_{res}.npz".lower())
409
+ # if the custom stat file already exists
410
+ if os.path.exists(outf):
411
+ msg = f"The statistics file {name} already exists. "
412
+ msg += "Use remove_custom_stats function to delete it first."
413
+ raise Exception(msg)
414
+ if model_name == "inception_v3":
415
+ feat_model = build_feature_extractor(mode, device)
416
+ custom_fn_resize = None
417
+ custom_image_tranform = None
418
+ elif model_name == "clip_vit_b_32":
419
+ from causvid.evaluation.coco_eval.cleanfid.clip_features import CLIP_fx, img_preprocess_clip
420
+ clip_fx = CLIP_fx("ViT-B/32")
421
+ feat_model = clip_fx
422
+ custom_fn_resize = img_preprocess_clip
423
+ custom_image_tranform = None
424
+ else:
425
+ raise ValueError(
426
+ f"The entered model name - {model_name} was not recognized.")
427
+
428
+ # get all inception features for folder images
429
+ np_feats = get_folder_features(fdir, feat_model, num_workers=num_workers, num=num,
430
+ batch_size=batch_size, device=device, verbose=verbose,
431
+ mode=mode, description=f"custom stats: {os.path.basename(fdir)} : ",
432
+ custom_image_tranform=custom_image_tranform,
433
+ custom_fn_resize=custom_fn_resize)
434
+
435
+ mu = np.mean(np_feats, axis=0)
436
+ sigma = np.cov(np_feats, rowvar=False)
437
+ print(f"saving custom FID stats to {outf}")
438
+ np.savez_compressed(outf, mu=mu, sigma=sigma)
439
+
440
+ # KID stats
441
+ outf = os.path.join(
442
+ stats_folder, f"{name}_{mode}{model_modifier}_{split}_{res}_kid.npz".lower())
443
+ print(f"saving custom KID stats to {outf}")
444
+ np.savez_compressed(outf, feats=np_feats)
445
+
446
+
447
+ def compute_kid(fdir1=None, fdir2=None, gen=None,
448
+ mode="clean", num_workers=12, batch_size=32,
449
+ device=torch.device("cuda"), dataset_name="FFHQ",
450
+ dataset_res=1024, dataset_split="train", num_gen=50_000, z_dim=512,
451
+ verbose=True, use_dataparallel=True):
452
+ # build the feature extractor based on the mode
453
+ feat_model = build_feature_extractor(
454
+ mode, device, use_dataparallel=use_dataparallel)
455
+
456
+ # if both dirs are specified, compute KID between folders
457
+ if fdir1 is not None and fdir2 is not None:
458
+ if verbose:
459
+ print("compute KID between two folders")
460
+ # get all inception features for the first folder
461
+ fbname1 = os.path.basename(fdir1)
462
+ np_feats1 = get_folder_features(fdir1, feat_model, num_workers=num_workers,
463
+ batch_size=batch_size, device=device, mode=mode,
464
+ description=f"KID {fbname1} : ", verbose=verbose)
465
+ # get all inception features for the second folder
466
+ fbname2 = os.path.basename(fdir2)
467
+ np_feats2 = get_folder_features(fdir2, feat_model, num_workers=num_workers,
468
+ batch_size=batch_size, device=device, mode=mode,
469
+ description=f"KID {fbname2} : ", verbose=verbose)
470
+ score = kernel_distance(np_feats1, np_feats2)
471
+ return score
472
+
473
+ # compute kid of a folder
474
+ elif fdir1 is not None and fdir2 is None:
475
+ if verbose:
476
+ print(f"compute KID of a folder with {dataset_name} statistics")
477
+ ref_feats = get_reference_statistics(dataset_name, dataset_res,
478
+ mode=mode, seed=0, split=dataset_split, metric="KID")
479
+ fbname = os.path.basename(fdir1)
480
+ # get all inception features for folder images
481
+ np_feats = get_folder_features(fdir1, feat_model, num_workers=num_workers,
482
+ batch_size=batch_size, device=device, mode=mode,
483
+ description=f"KID {fbname} : ", verbose=verbose)
484
+ score = kernel_distance(ref_feats, np_feats)
485
+ return score
486
+
487
+ # compute kid for a generator, using images in fdir2
488
+ elif gen is not None and fdir2 is not None:
489
+ if verbose:
490
+ print(f"compute KID of a model, using references in fdir2")
491
+ # get all inception features for the second folder
492
+ fbname2 = os.path.basename(fdir2)
493
+ ref_feats = get_folder_features(fdir2, feat_model, num_workers=num_workers,
494
+ batch_size=batch_size, device=device, mode=mode,
495
+ description=f"KID {fbname2} : ")
496
+ # Generate test features
497
+ np_feats = get_model_features(gen, feat_model, mode=mode,
498
+ z_dim=z_dim, num_gen=num_gen, desc="KID model: ",
499
+ batch_size=batch_size, device=device)
500
+ score = kernel_distance(ref_feats, np_feats)
501
+ return score
502
+
503
+ # compute fid for a generator, using reference statistics
504
+ elif gen is not None:
505
+ if verbose:
506
+ print(
507
+ f"compute KID of a model with {dataset_name}-{dataset_res} statistics")
508
+ ref_feats = get_reference_statistics(dataset_name, dataset_res,
509
+ mode=mode, seed=0, split=dataset_split, metric="KID")
510
+ # Generate test features
511
+ np_feats = get_model_features(gen, feat_model, mode=mode,
512
+ z_dim=z_dim, num_gen=num_gen, desc="KID model: ",
513
+ batch_size=batch_size, device=device, verbose=verbose)
514
+ score = kernel_distance(ref_feats, np_feats)
515
+ return score
516
+
517
+ else:
518
+ raise ValueError(
519
+ "invalid combination of directories and models entered")
520
+
521
+
522
+ """
523
+ custom_image_tranform:
524
+ function that takes an np_array image as input [0,255] and
525
+ applies a custom transform such as cropping
526
+ """
527
+
528
+
529
+ def compute_fid(fdir1=None, fdir2=None, gen=None,
530
+ mode="clean", model_name="inception_v3", num_workers=12,
531
+ batch_size=32, device=torch.device("cuda"), dataset_name="FFHQ",
532
+ dataset_res=1024, dataset_split="train", num_gen=50_000, z_dim=512,
533
+ custom_feat_extractor=None, verbose=True,
534
+ custom_image_tranform=None, custom_fn_resize=None,
535
+ use_dataparallel=True, pred_arr=None
536
+ ):
537
+ # build the feature extractor based on the mode and the model to be used
538
+ if custom_feat_extractor is None and model_name == "inception_v3":
539
+ feat_model = build_feature_extractor(
540
+ mode, device, use_dataparallel=use_dataparallel)
541
+ elif custom_feat_extractor is None and model_name == "clip_vit_b_32":
542
+ from causvid.evaluation.coco_eval.cleanfid.clip_features import CLIP_fx, img_preprocess_clip
543
+ clip_fx = CLIP_fx("ViT-B/32", device=device)
544
+ feat_model = clip_fx
545
+ custom_fn_resize = img_preprocess_clip
546
+ else:
547
+ feat_model = custom_feat_extractor
548
+
549
+ # if both dirs are specified, compute FID between folders
550
+ if fdir1 is not None and fdir2 is not None:
551
+ if verbose:
552
+ print("compute FID between two folders")
553
+ score = compare_folders(fdir1, fdir2, feat_model,
554
+ mode=mode, batch_size=batch_size,
555
+ num_workers=num_workers, device=device,
556
+ custom_image_tranform=custom_image_tranform,
557
+ custom_fn_resize=custom_fn_resize,
558
+ verbose=verbose)
559
+ return score
560
+
561
+ # compute fid of a folder
562
+ elif fdir1 is not None and fdir2 is None:
563
+ if verbose:
564
+ print(f"compute FID of a folder with {dataset_name} statistics")
565
+ score = fid_folder(fdir1, dataset_name, dataset_res, dataset_split,
566
+ model=feat_model, mode=mode, model_name=model_name,
567
+ custom_fn_resize=custom_fn_resize, custom_image_tranform=custom_image_tranform,
568
+ num_workers=num_workers, batch_size=batch_size, device=device, verbose=verbose)
569
+ return score
570
+
571
+ # compute fid for a generator, using images in fdir2
572
+ elif gen is not None and fdir2 is not None:
573
+ if verbose:
574
+ print(f"compute FID of a model, using references in fdir2")
575
+ # get all inception features for the second folder
576
+ fbname2 = os.path.basename(fdir2)
577
+ np_feats2 = get_folder_features(fdir2, feat_model, num_workers=num_workers,
578
+ batch_size=batch_size, device=device, mode=mode,
579
+ description=f"FID {fbname2} : ", verbose=verbose,
580
+ custom_fn_resize=custom_fn_resize,
581
+ custom_image_tranform=custom_image_tranform)
582
+ mu2 = np.mean(np_feats2, axis=0)
583
+ sigma2 = np.cov(np_feats2, rowvar=False)
584
+ # Generate test features
585
+ np_feats = get_model_features(gen, feat_model, mode=mode,
586
+ z_dim=z_dim, num_gen=num_gen,
587
+ custom_fn_resize=custom_fn_resize,
588
+ custom_image_tranform=custom_image_tranform,
589
+ batch_size=batch_size, device=device, verbose=verbose)
590
+
591
+ mu = np.mean(np_feats, axis=0)
592
+ sigma = np.cov(np_feats, rowvar=False)
593
+ fid = frechet_distance(mu, sigma, mu2, sigma2)
594
+ return fid
595
+
596
+ # compute fid for a generator, using reference statistics
597
+ elif gen is not None:
598
+ if verbose:
599
+ print(
600
+ f"compute FID of a model with {dataset_name}-{dataset_res} statistics")
601
+ score = fid_model(gen, dataset_name, dataset_res, dataset_split,
602
+ model=feat_model, model_name=model_name, z_dim=z_dim, num_gen=num_gen,
603
+ mode=mode, num_workers=num_workers, batch_size=batch_size,
604
+ custom_image_tranform=custom_image_tranform, custom_fn_resize=custom_fn_resize,
605
+ device=device, verbose=verbose)
606
+ return score
607
+
608
+ elif pred_arr is not None:
609
+ if verbose:
610
+ print(f"compute FID of a model, using references in fdir2")
611
+ # get all inception features for the second folder
612
+ fbname2 = os.path.basename(fdir2)
613
+ np_feats2 = get_folder_features(fdir2, feat_model, num_workers=num_workers,
614
+ batch_size=batch_size, device=device, mode=mode,
615
+ description=f"FID {fbname2} : ", verbose=verbose,
616
+ custom_fn_resize=custom_fn_resize,
617
+ custom_image_tranform=custom_image_tranform)
618
+ mu2 = np.mean(np_feats2, axis=0)
619
+ sigma2 = np.cov(np_feats2, rowvar=False)
620
+
621
+ # compute fid statistcs using the numpy array
622
+ np_feats = get_array_features(
623
+ pred_arr, model=feat_model, num_workers=num_workers,
624
+ batch_size=batch_size, device=device, mode=mode,
625
+ custom_fn_resize=custom_fn_resize,
626
+ custom_image_tranform=custom_image_tranform
627
+ )
628
+ mu = np.mean(np_feats, axis=0)
629
+ sigma = np.cov(np_feats, rowvar=False)
630
+ fid = frechet_distance(mu, sigma, mu2, sigma2)
631
+ return fid
632
+ # return fid, np_feats, np_feats2
633
+ else:
634
+ raise ValueError(
635
+ "invalid combination of directories and models entered")
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/inception_pytorch.py ADDED
@@ -0,0 +1,332 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ File from: https://github.com/mseitzer/pytorch-fid
3
+ """
4
+
5
+
6
+ import torch
7
+ import torch.nn as nn
8
+ import torch.nn.functional as F
9
+ import torchvision
10
+
11
+ try:
12
+ from torchvision.models.utils import load_state_dict_from_url
13
+ except ImportError:
14
+ from torch.utils.model_zoo import load_url as load_state_dict_from_url
15
+
16
+ # Inception weights ported to Pytorch from
17
+ # http://download.tensorflow.org/models/image/imagenet/inception-2015-12-05.tgz
18
+ FID_WEIGHTS_URL = 'https://github.com/mseitzer/pytorch-fid/releases/download/fid_weights/pt_inception-2015-12-05-6726825d.pth' # noqa: E501
19
+
20
+
21
+ class InceptionV3(nn.Module):
22
+ """Pretrained InceptionV3 network returning feature maps"""
23
+
24
+ # Index of default block of inception to return,
25
+ # corresponds to output of final average pooling
26
+ DEFAULT_BLOCK_INDEX = 3
27
+
28
+ # Maps feature dimensionality to their output blocks indices
29
+ BLOCK_INDEX_BY_DIM = {
30
+ 64: 0, # First max pooling features
31
+ 192: 1, # Second max pooling featurs
32
+ 768: 2, # Pre-aux classifier features
33
+ 2048: 3 # Final average pooling features
34
+ }
35
+
36
+ def __init__(self,
37
+ output_blocks=(DEFAULT_BLOCK_INDEX,),
38
+ resize_input=True,
39
+ normalize_input=True,
40
+ requires_grad=False,
41
+ use_fid_inception=True):
42
+ """Build pretrained InceptionV3
43
+ Parameters
44
+ ----------
45
+ output_blocks : list of int
46
+ Indices of blocks to return features of. Possible values are:
47
+ - 0: corresponds to output of first max pooling
48
+ - 1: corresponds to output of second max pooling
49
+ - 2: corresponds to output which is fed to aux classifier
50
+ - 3: corresponds to output of final average pooling
51
+ resize_input : bool
52
+ If true, bilinearly resizes input to width and height 299 before
53
+ feeding input to model. As the network without fully connected
54
+ layers is fully convolutional, it should be able to handle inputs
55
+ of arbitrary size, so resizing might not be strictly needed
56
+ normalize_input : bool
57
+ If true, scales the input from range (0, 1) to the range the
58
+ pretrained Inception network expects, namely (-1, 1)
59
+ requires_grad : bool
60
+ If true, parameters of the model require gradients. Possibly useful
61
+ for finetuning the network
62
+ use_fid_inception : bool
63
+ If true, uses the pretrained Inception model used in Tensorflow's
64
+ FID implementation. If false, uses the pretrained Inception model
65
+ available in torchvision. The FID Inception model has different
66
+ weights and a slightly different structure from torchvision's
67
+ Inception model. If you want to compute FID scores, you are
68
+ strongly advised to set this parameter to true to get comparable
69
+ results.
70
+ """
71
+ super(InceptionV3, self).__init__()
72
+
73
+ self.resize_input = resize_input
74
+ self.normalize_input = normalize_input
75
+ self.output_blocks = sorted(output_blocks)
76
+ self.last_needed_block = max(output_blocks)
77
+
78
+ assert self.last_needed_block <= 3, \
79
+ 'Last possible output block index is 3'
80
+
81
+ self.blocks = nn.ModuleList()
82
+
83
+ if use_fid_inception:
84
+ inception = fid_inception_v3()
85
+ else:
86
+ inception = _inception_v3(pretrained=True)
87
+
88
+ # Block 0: input to maxpool1
89
+ block0 = [
90
+ inception.Conv2d_1a_3x3,
91
+ inception.Conv2d_2a_3x3,
92
+ inception.Conv2d_2b_3x3,
93
+ nn.MaxPool2d(kernel_size=3, stride=2)
94
+ ]
95
+ self.blocks.append(nn.Sequential(*block0))
96
+
97
+ # Block 1: maxpool1 to maxpool2
98
+ if self.last_needed_block >= 1:
99
+ block1 = [
100
+ inception.Conv2d_3b_1x1,
101
+ inception.Conv2d_4a_3x3,
102
+ nn.MaxPool2d(kernel_size=3, stride=2)
103
+ ]
104
+ self.blocks.append(nn.Sequential(*block1))
105
+
106
+ # Block 2: maxpool2 to aux classifier
107
+ if self.last_needed_block >= 2:
108
+ block2 = [
109
+ inception.Mixed_5b,
110
+ inception.Mixed_5c,
111
+ inception.Mixed_5d,
112
+ inception.Mixed_6a,
113
+ inception.Mixed_6b,
114
+ inception.Mixed_6c,
115
+ inception.Mixed_6d,
116
+ inception.Mixed_6e,
117
+ ]
118
+ self.blocks.append(nn.Sequential(*block2))
119
+
120
+ # Block 3: aux classifier to final avgpool
121
+ if self.last_needed_block >= 3:
122
+ block3 = [
123
+ inception.Mixed_7a,
124
+ inception.Mixed_7b,
125
+ inception.Mixed_7c,
126
+ nn.AdaptiveAvgPool2d(output_size=(1, 1))
127
+ ]
128
+ self.blocks.append(nn.Sequential(*block3))
129
+
130
+ for param in self.parameters():
131
+ param.requires_grad = requires_grad
132
+
133
+ def forward(self, inp):
134
+ """Get Inception feature maps
135
+ Parameters
136
+ ----------
137
+ inp : torch.autograd.Variable
138
+ Input tensor of shape Bx3xHxW. Values are expected to be in
139
+ range (0, 1)
140
+ Returns
141
+ -------
142
+ List of torch.autograd.Variable, corresponding to the selected output
143
+ block, sorted ascending by index
144
+ """
145
+ outp = []
146
+ x = inp
147
+
148
+ if self.resize_input:
149
+ raise ValueError("should not resize here")
150
+ x = F.interpolate(x,
151
+ size=(299, 299),
152
+ mode='bilinear',
153
+ align_corners=False)
154
+
155
+ if self.normalize_input:
156
+ x = 2 * x - 1 # Scale from range (0, 1) to range (-1, 1)
157
+
158
+ for idx, block in enumerate(self.blocks):
159
+ x = block(x)
160
+ if idx in self.output_blocks:
161
+ outp.append(x)
162
+
163
+ if idx == self.last_needed_block:
164
+ break
165
+
166
+ return outp
167
+
168
+
169
+ def _inception_v3(*args, **kwargs):
170
+ """Wraps `torchvision.models.inception_v3`
171
+ Skips default weight inititialization if supported by torchvision version.
172
+ See https://github.com/mseitzer/pytorch-fid/issues/28.
173
+ """
174
+ try:
175
+ version = tuple(map(int, torchvision.__version__.split('.')[:2]))
176
+ except ValueError:
177
+ # Just a caution against weird version strings
178
+ version = (0,)
179
+
180
+ if version >= (0, 6):
181
+ kwargs['init_weights'] = False
182
+
183
+ return torchvision.models.inception_v3(*args, **kwargs)
184
+
185
+
186
+ def fid_inception_v3():
187
+ """Build pretrained Inception model for FID computation
188
+ The Inception model for FID computation uses a different set of weights
189
+ and has a slightly different structure than torchvision's Inception.
190
+ This method first constructs torchvision's Inception and then patches the
191
+ necessary parts that are different in the FID Inception model.
192
+ """
193
+ inception = _inception_v3(num_classes=1008,
194
+ aux_logits=False,
195
+ pretrained=False)
196
+ inception.Mixed_5b = FIDInceptionA(192, pool_features=32)
197
+ inception.Mixed_5c = FIDInceptionA(256, pool_features=64)
198
+ inception.Mixed_5d = FIDInceptionA(288, pool_features=64)
199
+ inception.Mixed_6b = FIDInceptionC(768, channels_7x7=128)
200
+ inception.Mixed_6c = FIDInceptionC(768, channels_7x7=160)
201
+ inception.Mixed_6d = FIDInceptionC(768, channels_7x7=160)
202
+ inception.Mixed_6e = FIDInceptionC(768, channels_7x7=192)
203
+ inception.Mixed_7b = FIDInceptionE_1(1280)
204
+ inception.Mixed_7c = FIDInceptionE_2(2048)
205
+
206
+ state_dict = load_state_dict_from_url(FID_WEIGHTS_URL, progress=False)
207
+ inception.load_state_dict(state_dict)
208
+ return inception
209
+
210
+
211
+ class FIDInceptionA(torchvision.models.inception.InceptionA):
212
+ """InceptionA block patched for FID computation"""
213
+
214
+ def __init__(self, in_channels, pool_features):
215
+ super(FIDInceptionA, self).__init__(in_channels, pool_features)
216
+
217
+ def forward(self, x):
218
+ branch1x1 = self.branch1x1(x)
219
+
220
+ branch5x5 = self.branch5x5_1(x)
221
+ branch5x5 = self.branch5x5_2(branch5x5)
222
+
223
+ branch3x3dbl = self.branch3x3dbl_1(x)
224
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
225
+ branch3x3dbl = self.branch3x3dbl_3(branch3x3dbl)
226
+
227
+ # Patch: Tensorflow's average pool does not use the padded zero's in
228
+ # its average calculation
229
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
230
+ count_include_pad=False)
231
+ branch_pool = self.branch_pool(branch_pool)
232
+
233
+ outputs = [branch1x1, branch5x5, branch3x3dbl, branch_pool]
234
+ return torch.cat(outputs, 1)
235
+
236
+
237
+ class FIDInceptionC(torchvision.models.inception.InceptionC):
238
+ """InceptionC block patched for FID computation"""
239
+
240
+ def __init__(self, in_channels, channels_7x7):
241
+ super(FIDInceptionC, self).__init__(in_channels, channels_7x7)
242
+
243
+ def forward(self, x):
244
+ branch1x1 = self.branch1x1(x)
245
+
246
+ branch7x7 = self.branch7x7_1(x)
247
+ branch7x7 = self.branch7x7_2(branch7x7)
248
+ branch7x7 = self.branch7x7_3(branch7x7)
249
+
250
+ branch7x7dbl = self.branch7x7dbl_1(x)
251
+ branch7x7dbl = self.branch7x7dbl_2(branch7x7dbl)
252
+ branch7x7dbl = self.branch7x7dbl_3(branch7x7dbl)
253
+ branch7x7dbl = self.branch7x7dbl_4(branch7x7dbl)
254
+ branch7x7dbl = self.branch7x7dbl_5(branch7x7dbl)
255
+
256
+ # Patch: Tensorflow's average pool does not use the padded zero's in
257
+ # its average calculation
258
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
259
+ count_include_pad=False)
260
+ branch_pool = self.branch_pool(branch_pool)
261
+
262
+ outputs = [branch1x1, branch7x7, branch7x7dbl, branch_pool]
263
+ return torch.cat(outputs, 1)
264
+
265
+
266
+ class FIDInceptionE_1(torchvision.models.inception.InceptionE):
267
+ """First InceptionE block patched for FID computation"""
268
+
269
+ def __init__(self, in_channels):
270
+ super(FIDInceptionE_1, self).__init__(in_channels)
271
+
272
+ def forward(self, x):
273
+ branch1x1 = self.branch1x1(x)
274
+
275
+ branch3x3 = self.branch3x3_1(x)
276
+ branch3x3 = [
277
+ self.branch3x3_2a(branch3x3),
278
+ self.branch3x3_2b(branch3x3),
279
+ ]
280
+ branch3x3 = torch.cat(branch3x3, 1)
281
+
282
+ branch3x3dbl = self.branch3x3dbl_1(x)
283
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
284
+ branch3x3dbl = [
285
+ self.branch3x3dbl_3a(branch3x3dbl),
286
+ self.branch3x3dbl_3b(branch3x3dbl),
287
+ ]
288
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
289
+
290
+ # Patch: Tensorflow's average pool does not use the padded zero's in
291
+ # its average calculation
292
+ branch_pool = F.avg_pool2d(x, kernel_size=3, stride=1, padding=1,
293
+ count_include_pad=False)
294
+ branch_pool = self.branch_pool(branch_pool)
295
+
296
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
297
+ return torch.cat(outputs, 1)
298
+
299
+
300
+ class FIDInceptionE_2(torchvision.models.inception.InceptionE):
301
+ """Second InceptionE block patched for FID computation"""
302
+
303
+ def __init__(self, in_channels):
304
+ super(FIDInceptionE_2, self).__init__(in_channels)
305
+
306
+ def forward(self, x):
307
+ branch1x1 = self.branch1x1(x)
308
+
309
+ branch3x3 = self.branch3x3_1(x)
310
+ branch3x3 = [
311
+ self.branch3x3_2a(branch3x3),
312
+ self.branch3x3_2b(branch3x3),
313
+ ]
314
+ branch3x3 = torch.cat(branch3x3, 1)
315
+
316
+ branch3x3dbl = self.branch3x3dbl_1(x)
317
+ branch3x3dbl = self.branch3x3dbl_2(branch3x3dbl)
318
+ branch3x3dbl = [
319
+ self.branch3x3dbl_3a(branch3x3dbl),
320
+ self.branch3x3dbl_3b(branch3x3dbl),
321
+ ]
322
+ branch3x3dbl = torch.cat(branch3x3dbl, 1)
323
+
324
+ # Patch: The FID Inception model uses max pooling instead of average
325
+ # pooling. This is likely an error in this specific Inception
326
+ # implementation, as other Inception models use average pooling here
327
+ # (which matches the description in the paper).
328
+ branch_pool = F.max_pool2d(x, kernel_size=3, stride=1, padding=1)
329
+ branch_pool = self.branch_pool(branch_pool)
330
+
331
+ outputs = [branch1x1, branch3x3, branch3x3dbl, branch_pool]
332
+ return torch.cat(outputs, 1)
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/inception_torchscript.py ADDED
@@ -0,0 +1,57 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import torch
3
+ import torch.nn as nn
4
+ from causvid.evaluation.coco_eval.cleanfid.downloads_helper import *
5
+ import contextlib
6
+
7
+
8
+ @contextlib.contextmanager
9
+ def disable_gpu_fuser_on_pt19():
10
+ # On PyTorch 1.9 a CUDA fuser bug prevents the Inception JIT model to run. See
11
+ # https://github.com/GaParmar/clean-fid/issues/5
12
+ # https://github.com/pytorch/pytorch/issues/64062
13
+ if torch.__version__.startswith('1.9.'):
14
+ old_val = torch._C._jit_can_fuse_on_gpu()
15
+ torch._C._jit_override_can_fuse_on_gpu(False)
16
+ yield
17
+ if torch.__version__.startswith('1.9.'):
18
+ torch._C._jit_override_can_fuse_on_gpu(old_val)
19
+
20
+
21
+ class InceptionV3W(nn.Module):
22
+ """
23
+ Wrapper around Inception V3 torchscript model provided here
24
+ https://nvlabs-fi-cdn.nvidia.com/stylegan2-ada-pytorch/pretrained/metrics/inception-2015-12-05.pt
25
+
26
+ path: locally saved inception weights
27
+ """
28
+
29
+ def __init__(self, path, download=True, resize_inside=False):
30
+ super(InceptionV3W, self).__init__()
31
+ # download the network if it is not present at the given directory
32
+ # use the current directory by default
33
+ if download:
34
+ check_download_inception(fpath=path)
35
+ path = os.path.join(path, "inception-2015-12-05.pt")
36
+ self.base = torch.jit.load(path).eval()
37
+ self.layers = self.base.layers
38
+ self.resize_inside = resize_inside
39
+
40
+ """
41
+ Get the inception features without resizing
42
+ x: Image with values in range [0,255]
43
+ """
44
+
45
+ def forward(self, x):
46
+ with disable_gpu_fuser_on_pt19():
47
+ bs = x.shape[0]
48
+ if self.resize_inside:
49
+ features = self.base(x, return_features=True).view((bs, 2048))
50
+ else:
51
+ # make sure it is resized already
52
+ assert (x.shape[2] == 299) and (x.shape[3] == 299)
53
+ # apply normalization
54
+ x1 = x - 128
55
+ x2 = x1 / 128
56
+ features = self.layers.forward(x2, ).view((bs, 2048))
57
+ return features
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/leaderboard.py ADDED
@@ -0,0 +1,43 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import csv
3
+ import shutil
4
+ import urllib.request
5
+
6
+
7
+ def get_score(model_name=None, dataset_name=None,
8
+ dataset_res=None, dataset_split=None, task_name=None):
9
+ # download the csv file from server
10
+ url = "https://www.cs.cmu.edu/~clean-fid/files/leaderboard.csv"
11
+ local_path = "/tmp/leaderboard.csv"
12
+ with urllib.request.urlopen(url) as response, open(local_path, 'wb') as f:
13
+ shutil.copyfileobj(response, f)
14
+
15
+ d_field2idx = {}
16
+ l_matches = []
17
+ with open(local_path, 'r') as f:
18
+ csvreader = csv.reader(f)
19
+ l_fields = next(csvreader)
20
+ for idx, val in enumerate(l_fields):
21
+ d_field2idx[val.strip()] = idx
22
+ # iterate through all rows
23
+ for row in csvreader:
24
+ # skip empty rows
25
+ if len(row) == 0:
26
+ continue
27
+ # skip if the filter doesn't match
28
+ if model_name is not None and (row[d_field2idx["model_name"]].strip() != model_name):
29
+ continue
30
+ if dataset_name is not None and (row[d_field2idx["dataset_name"]].strip() != dataset_name):
31
+ continue
32
+ if dataset_res is not None and (row[d_field2idx["dataset_res"]].strip() != dataset_res):
33
+ continue
34
+ if dataset_split is not None and (row[d_field2idx["dataset_split"]].strip() != dataset_split):
35
+ continue
36
+ if task_name is not None and (row[d_field2idx["task_name"]].strip() != task_name):
37
+ continue
38
+ curr = {}
39
+ for f in l_fields:
40
+ curr[f.strip()] = row[d_field2idx[f.strip()]].strip()
41
+ l_matches.append(curr)
42
+ os.remove(local_path)
43
+ return l_matches
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/resize.py ADDED
@@ -0,0 +1,133 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ Helpers for resizing with multiple CPU cores
3
+ """
4
+ import os
5
+ import numpy as np
6
+ import torch
7
+ from PIL import Image
8
+ import torch.nn.functional as F
9
+
10
+
11
+ def build_resizer(mode):
12
+ if mode == "clean":
13
+ return make_resizer("PIL", False, "bicubic", (299, 299))
14
+ # if using legacy tensorflow, do not manually resize outside the network
15
+ elif mode == "legacy_tensorflow":
16
+ return lambda x: x
17
+ elif mode == "legacy_pytorch":
18
+ return make_resizer("PyTorch", False, "bilinear", (299, 299))
19
+ else:
20
+ raise ValueError(f"Invalid mode {mode} specified")
21
+
22
+
23
+ """
24
+ Construct a function that resizes a numpy image based on the
25
+ flags passed in.
26
+ """
27
+
28
+
29
+ def make_resizer(library, quantize_after, filter, output_size):
30
+ if library == "PIL" and quantize_after:
31
+ name_to_filter = {
32
+ "bicubic": Image.BICUBIC,
33
+ "bilinear": Image.BILINEAR,
34
+ "nearest": Image.NEAREST,
35
+ "lanczos": Image.LANCZOS,
36
+ "box": Image.BOX
37
+ }
38
+
39
+ def func(x):
40
+ x = Image.fromarray(x)
41
+ x = x.resize(output_size, resample=name_to_filter[filter])
42
+ x = np.asarray(x).clip(0, 255).astype(np.uint8)
43
+ return x
44
+ elif library == "PIL" and not quantize_after:
45
+ name_to_filter = {
46
+ "bicubic": Image.BICUBIC,
47
+ "bilinear": Image.BILINEAR,
48
+ "nearest": Image.NEAREST,
49
+ "lanczos": Image.LANCZOS,
50
+ "box": Image.BOX
51
+ }
52
+ s1, s2 = output_size
53
+
54
+ def resize_single_channel(x_np):
55
+ img = Image.fromarray(x_np.astype(np.float32), mode='F')
56
+ img = img.resize(output_size, resample=name_to_filter[filter])
57
+ return np.asarray(img).clip(0, 255).reshape(s2, s1, 1)
58
+
59
+ def func(x):
60
+ x = [resize_single_channel(x[:, :, idx]) for idx in range(3)]
61
+ x = np.concatenate(x, axis=2).astype(np.float32)
62
+ return x
63
+ elif library == "PyTorch":
64
+ import warnings
65
+ # ignore the numpy warnings
66
+ warnings.filterwarnings("ignore")
67
+
68
+ def func(x):
69
+ x = torch.Tensor(x.transpose((2, 0, 1)))[None, ...]
70
+ x = F.interpolate(x, size=output_size, mode=filter, align_corners=False)
71
+ x = x[0, ...].cpu().data.numpy().transpose((1, 2, 0)).clip(0, 255)
72
+ if quantize_after:
73
+ x = x.astype(np.uint8)
74
+ return x
75
+ elif library == "TensorFlow":
76
+ import warnings
77
+ # ignore the numpy warnings
78
+ warnings.filterwarnings("ignore")
79
+ import tensorflow as tf
80
+
81
+ def func(x):
82
+ x = tf.constant(x)[tf.newaxis, ...]
83
+ x = tf.image.resize(x, output_size, method=filter)
84
+ x = x[0, ...].numpy().clip(0, 255)
85
+ if quantize_after:
86
+ x = x.astype(np.uint8)
87
+ return x
88
+ elif library == "OpenCV":
89
+ import cv2
90
+ name_to_filter = {
91
+ "bilinear": cv2.INTER_LINEAR,
92
+ "bicubic": cv2.INTER_CUBIC,
93
+ "lanczos": cv2.INTER_LANCZOS4,
94
+ "nearest": cv2.INTER_NEAREST,
95
+ "area": cv2.INTER_AREA
96
+ }
97
+
98
+ def func(x):
99
+ x = cv2.resize(x, output_size, interpolation=name_to_filter[filter])
100
+ x = x.clip(0, 255)
101
+ if quantize_after:
102
+ x = x.astype(np.uint8)
103
+ return x
104
+ else:
105
+ raise NotImplementedError('library [%s] is not include' % library)
106
+ return func
107
+
108
+
109
+ class FolderResizer(torch.utils.data.Dataset):
110
+ def __init__(self, files, outpath, fn_resize, output_ext=".png"):
111
+ self.files = files
112
+ self.outpath = outpath
113
+ self.output_ext = output_ext
114
+ self.fn_resize = fn_resize
115
+
116
+ def __len__(self):
117
+ return len(self.files)
118
+
119
+ def __getitem__(self, i):
120
+ path = str(self.files[i])
121
+ img_np = np.asarray(Image.open(path))
122
+ img_resize_np = self.fn_resize(img_np)
123
+ # swap the output extension
124
+ basename = os.path.basename(path).split(".")[0] + self.output_ext
125
+ outname = os.path.join(self.outpath, basename)
126
+ if self.output_ext == ".npy":
127
+ np.save(outname, img_resize_np)
128
+ elif self.output_ext == ".png":
129
+ img_resized_pil = Image.fromarray(img_resize_np)
130
+ img_resized_pil.save(outname)
131
+ else:
132
+ raise ValueError("invalid output extension")
133
+ return 0
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/utils.py ADDED
@@ -0,0 +1,98 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import numpy as np
2
+ import torch
3
+ import torchvision
4
+ from PIL import Image
5
+ from causvid.evaluation.coco_eval.cleanfid.resize import build_resizer
6
+ import zipfile
7
+
8
+
9
+ class ResizeDataset(torch.utils.data.Dataset):
10
+ """
11
+ A placeholder Dataset that enables parallelizing the resize operation
12
+ using multiple CPU cores
13
+
14
+ files: list of all files in the folder
15
+ fn_resize: function that takes an np_array as input [0,255]
16
+ """
17
+
18
+ def __init__(self, files, mode, size=(299, 299), fdir=None):
19
+ self.files = files
20
+ self.fdir = fdir
21
+ self.transforms = torchvision.transforms.ToTensor()
22
+ self.size = size
23
+ self.fn_resize = build_resizer(mode)
24
+ self.custom_image_tranform = lambda x: x
25
+ self._zipfile = None
26
+
27
+ def _get_zipfile(self):
28
+ assert self.fdir is not None and '.zip' in self.fdir
29
+ if self._zipfile is None:
30
+ self._zipfile = zipfile.ZipFile(self.fdir)
31
+ return self._zipfile
32
+
33
+ def __len__(self):
34
+ return len(self.files)
35
+
36
+ def __getitem__(self, i):
37
+ path = str(self.files[i])
38
+ if self.fdir is not None and '.zip' in self.fdir:
39
+ with self._get_zipfile().open(path, 'r') as f:
40
+ img_np = np.array(Image.open(f).convert('RGB'))
41
+ elif ".npy" in path:
42
+ img_np = np.load(path)
43
+ else:
44
+ img_pil = Image.open(path).convert('RGB')
45
+ img_np = np.array(img_pil)
46
+
47
+ # apply a custom image transform before resizing the image to 299x299
48
+ img_np = self.custom_image_tranform(img_np)
49
+ # fn_resize expects a np array and returns a np array
50
+ img_resized = self.fn_resize(img_np)
51
+
52
+ # ToTensor() converts to [0,1] only if input in uint8
53
+ if img_resized.dtype == "uint8":
54
+ img_t = self.transforms(np.array(img_resized)) * 255
55
+ elif img_resized.dtype == "float32":
56
+ img_t = self.transforms(img_resized)
57
+
58
+ return img_t
59
+
60
+
61
+ EXTENSIONS = {'bmp', 'jpg', 'jpeg', 'pgm', 'png', 'ppm',
62
+ 'tif', 'tiff', 'webp', 'npy', 'JPEG', 'JPG', 'PNG'}
63
+
64
+
65
+ class ResizeArrayDataset(torch.utils.data.Dataset):
66
+ """
67
+ A placeholder Dataset that enables parallelizing the resize operation
68
+ using multiple CPU cores
69
+
70
+ files: list of all files in the folder
71
+ fn_resize: function that takes an np_array as input [0,255]
72
+ """
73
+
74
+ def __init__(self, array, mode, size=(299, 299)):
75
+ self.array = array
76
+ self.transforms = torchvision.transforms.ToTensor()
77
+ self.size = size
78
+ self.fn_resize = build_resizer(mode)
79
+ self.custom_image_tranform = lambda x: x
80
+
81
+ def __len__(self):
82
+ return len(self.array)
83
+
84
+ def __getitem__(self, i):
85
+ img_np = self.array[i]
86
+
87
+ # apply a custom image transform before resizing the image to 299x299
88
+ img_np = self.custom_image_tranform(img_np)
89
+ # fn_resize expects a np array and returns a np array
90
+ img_resized = self.fn_resize(img_np)
91
+
92
+ # ToTensor() converts to [0,1] only if input in uint8
93
+ if img_resized.dtype == "uint8":
94
+ img_t = self.transforms(np.array(img_resized)) * 255
95
+ elif img_resized.dtype == "float32":
96
+ img_t = self.transforms(img_resized)
97
+
98
+ return img_t
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/cleanfid/wrappers.py ADDED
@@ -0,0 +1,108 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from PIL import Image
2
+ import numpy as np
3
+ import torch
4
+ from causvid.evaluation.coco_eval.cleanfid.features import build_feature_extractor, get_reference_statistics
5
+ from causvid.evaluation.coco_eval.cleanfid.fid import get_batch_features, fid_from_feats
6
+ from causvid.evaluation.coco_eval.cleanfid.resize import build_resizer
7
+
8
+
9
+ """
10
+ A helper class that allowing adding the images one batch at a time.
11
+ """
12
+
13
+
14
+ class CleanFID():
15
+ def __init__(self, mode="clean", model_name="inception_v3", device="cuda"):
16
+ self.real_features = []
17
+ self.gen_features = []
18
+ self.mode = mode
19
+ self.device = device
20
+ if model_name == "inception_v3":
21
+ self.feat_model = build_feature_extractor(mode, device)
22
+ self.fn_resize = build_resizer(mode)
23
+ elif model_name == "clip_vit_b_32":
24
+ from causvid.evaluation.coco_eval.cleanfid.clip_features import CLIP_fx, img_preprocess_clip
25
+ clip_fx = CLIP_fx("ViT-B/32")
26
+ self.feat_model = clip_fx
27
+ self.fn_resize = img_preprocess_clip
28
+
29
+ """
30
+ Funtion that takes an image (PIL.Image or np.array or torch.tensor)
31
+ and returns the corresponding feature embedding vector.
32
+ The image x is expected to be in range [0, 255]
33
+ """
34
+
35
+ def compute_features(self, x):
36
+ # if x is a PIL Image
37
+ if isinstance(x, Image.Image):
38
+ x_np = np.array(x)
39
+ x_np_resized = self.fn_resize(x_np)
40
+ x_t = torch.tensor(x_np_resized.transpose((2, 0, 1))).unsqueeze(0)
41
+ x_feat = get_batch_features(x_t, self.feat_model, self.device)
42
+ elif isinstance(x, np.ndarray):
43
+ x_np_resized = self.fn_resize(x)
44
+ x_t = torch.tensor(x_np_resized.transpose(
45
+ (2, 0, 1))).unsqueeze(0).to(self.device)
46
+ # normalization happens inside the self.feat_model, expected image range here is [0,255]
47
+ x_feat = get_batch_features(x_t, self.feat_model, self.device)
48
+ elif isinstance(x, torch.Tensor):
49
+ # pdb.set_trace()
50
+ # add the batch dimension if x is passed in as C,H,W
51
+ if len(x.shape) == 3:
52
+ x = x.unsqueeze(0)
53
+ b, c, h, w = x.shape
54
+ # convert back to np array and resize
55
+ l_x_np_resized = []
56
+ for _ in range(b):
57
+ x_np = x[_].cpu().numpy().transpose((1, 2, 0))
58
+ l_x_np_resized.append(self.fn_resize(x_np)[None,])
59
+ x_np_resized = np.concatenate(l_x_np_resized)
60
+ x_t = torch.tensor(x_np_resized.transpose(
61
+ (0, 3, 1, 2))).to(self.device)
62
+ # normalization happens inside the self.feat_model, expected image range here is [0,255]
63
+ x_feat = get_batch_features(x_t, self.feat_model, self.device)
64
+ else:
65
+ raise ValueError("image type could not be inferred")
66
+ return x_feat
67
+
68
+ """
69
+ Extract the faetures from x and add to the list of reference real images
70
+ """
71
+
72
+ def add_real_images(self, x):
73
+ x_feat = self.compute_features(x)
74
+ self.real_features.append(x_feat)
75
+
76
+ """
77
+ Extract the faetures from x and add to the list of generated images
78
+ """
79
+
80
+ def add_gen_images(self, x):
81
+ x_feat = self.compute_features(x)
82
+ self.gen_features.append(x_feat)
83
+
84
+ """
85
+ Compute FID between the real and generated images added so far
86
+ """
87
+
88
+ def calculate_fid(self, verbose=True):
89
+ feats1 = np.concatenate(self.real_features)
90
+ feats2 = np.concatenate(self.gen_features)
91
+ if verbose:
92
+ print(f"# real images = {feats1.shape[0]}")
93
+ print(f"# generated images = {feats2.shape[0]}")
94
+ return fid_from_feats(feats1, feats2)
95
+
96
+ """
97
+ Remove the real image features added so far
98
+ """
99
+
100
+ def reset_real_features(self):
101
+ self.real_features = []
102
+
103
+ """
104
+ Remove the generated image features added so far
105
+ """
106
+
107
+ def reset_gen_features(self):
108
+ self.gen_features = []
exp_code/1_benchmark/CausVid/causvid/evaluation/coco_eval/coco_evaluator.py ADDED
@@ -0,0 +1,246 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Part of this code is modified from GigaGAN: https://github.com/mingukkang/GigaGAN
2
+ # The MIT License (MIT)
3
+ from causvid.evaluation.coco_eval.cleanfid import fid
4
+ from torchvision.transforms import InterpolationMode
5
+ import torchvision.transforms as transforms
6
+ from torch.utils.data import DataLoader
7
+ from torch.utils.data import Dataset
8
+ from PIL import Image
9
+ import numpy as np
10
+ import shutil
11
+ import torch
12
+ import time
13
+ import os
14
+
15
+ resizer_collection = {"nearest": InterpolationMode.NEAREST,
16
+ "box": InterpolationMode.BOX,
17
+ "bilinear": InterpolationMode.BILINEAR,
18
+ "hamming": InterpolationMode.HAMMING,
19
+ "bicubic": InterpolationMode.BICUBIC,
20
+ "lanczos": InterpolationMode.LANCZOS}
21
+
22
+
23
+ class CenterCropLongEdge(object):
24
+ """
25
+ this code is borrowed from https://github.com/ajbrock/BigGAN-PyTorch
26
+ MIT License
27
+ Copyright (c) 2019 Andy Brock
28
+ """
29
+
30
+ def __call__(self, img):
31
+ return transforms.functional.center_crop(img, min(img.size))
32
+
33
+ def __repr__(self):
34
+ return self.__class__.__name__
35
+
36
+
37
+ @torch.no_grad()
38
+ def compute_fid(fake_arr, gt_dir, device,
39
+ resize_size=None, feature_extractor="inception",
40
+ patch_fid=False):
41
+ center_crop_trsf = CenterCropLongEdge()
42
+
43
+ def resize_and_center_crop(image_np):
44
+ image_pil = Image.fromarray(image_np)
45
+ if patch_fid:
46
+ # if image_pil.size[0] != 1024 and image_pil.size[1] != 1024:
47
+ # image_pil = image_pil.resize([1024, 1024])
48
+
49
+ # directly crop to the 299 x 299 patch expected by the inception network
50
+ if image_pil.size[0] >= 299 and image_pil.size[1] >= 299:
51
+ image_pil = transforms.functional.center_crop(image_pil, 299)
52
+ # else:
53
+ # raise ValueError("Image is too small to crop to 299 x 299")
54
+ else:
55
+ image_pil = center_crop_trsf(image_pil)
56
+
57
+ if resize_size is not None:
58
+ image_pil = image_pil.resize((resize_size, resize_size),
59
+ Image.LANCZOS)
60
+ return np.array(image_pil)
61
+
62
+ if feature_extractor == "inception":
63
+ model_name = "inception_v3"
64
+ elif feature_extractor == "clip":
65
+ model_name = "clip_vit_b_32"
66
+ else:
67
+ raise ValueError(
68
+ "Unrecognized feature extractor [%s]" % feature_extractor)
69
+ # fid, fake_feats, real_feats = fid.compute_fid(
70
+ stat = fid.compute_fid(
71
+ None,
72
+ gt_dir,
73
+ model_name=model_name,
74
+ custom_image_tranform=resize_and_center_crop,
75
+ use_dataparallel=False,
76
+ device=device,
77
+ pred_arr=fake_arr
78
+ )
79
+ # return fid, fake_feats, real_feats
80
+ return stat
81
+
82
+
83
+ def evaluate_model(args, device, all_images, patch_fid=False):
84
+ fid = compute_fid(
85
+ fake_arr=all_images,
86
+ gt_dir=args.ref_dir,
87
+ device=device,
88
+ resize_size=args.eval_res,
89
+ feature_extractor="inception",
90
+ patch_fid=patch_fid
91
+ )
92
+
93
+ return fid
94
+
95
+
96
+ def tensor2pil(image: torch.Tensor):
97
+ ''' output image : tensor to PIL
98
+ '''
99
+ if isinstance(image, list) or image.ndim == 4:
100
+ return [tensor2pil(im) for im in image]
101
+
102
+ assert image.ndim == 3
103
+ output_image = Image.fromarray(((image + 1.0) * 127.5).clamp(
104
+ 0.0, 255.0).to(torch.uint8).permute(1, 2, 0).detach().cpu().numpy())
105
+ return output_image
106
+
107
+
108
+ class CLIPScoreDataset(Dataset):
109
+ def __init__(self, images, captions, transform, preprocessor) -> None:
110
+ super().__init__()
111
+ self.images = images
112
+ self.captions = captions
113
+ self.transform = transform
114
+ self.preprocessor = preprocessor
115
+
116
+ def __len__(self):
117
+ return len(self.images)
118
+
119
+ def __getitem__(self, index):
120
+ image = self.images[index]
121
+ image_pil = self.transform(image)
122
+ image_pil = self.preprocessor(image_pil)
123
+ caption = self.captions[index]
124
+ return image_pil, caption
125
+
126
+
127
+ @torch.no_grad()
128
+ def compute_clip_score(
129
+ images, captions, clip_model="ViT-B/32", device="cuda", how_many=30000):
130
+ print("Computing CLIP score")
131
+ import clip as openai_clip
132
+ if clip_model == "ViT-B/32":
133
+ clip, clip_preprocessor = openai_clip.load("ViT-B/32", device=device)
134
+ clip = clip.eval()
135
+ elif clip_model == "ViT-G/14":
136
+ import open_clip
137
+ clip, _, clip_preprocessor = open_clip.create_model_and_transforms(
138
+ "ViT-g-14", pretrained="laion2b_s12b_b42k")
139
+ clip = clip.to(device)
140
+ clip = clip.eval()
141
+ clip = clip.float()
142
+ else:
143
+ raise NotImplementedError
144
+
145
+ def resize_and_center_crop(image_np, resize_size=256):
146
+ image_pil = Image.fromarray(image_np)
147
+ image_pil = CenterCropLongEdge()(image_pil)
148
+
149
+ if resize_size is not None:
150
+ image_pil = image_pil.resize((resize_size, resize_size),
151
+ Image.LANCZOS)
152
+ return image_pil
153
+
154
+ def simple_collate(batch):
155
+ images, captions = [], []
156
+ for img, cap in batch:
157
+ images.append(img)
158
+ captions.append(cap)
159
+ return images, captions
160
+
161
+ dataset = CLIPScoreDataset(
162
+ images, captions, transform=resize_and_center_crop,
163
+ preprocessor=clip_preprocessor
164
+ )
165
+ dataloader = DataLoader(
166
+ dataset, batch_size=64,
167
+ shuffle=False, num_workers=8,
168
+ collate_fn=simple_collate
169
+
170
+ )
171
+
172
+ cos_sims = []
173
+ count = 0
174
+ # for imgs, txts in zip(images, captions):
175
+ for index, (imgs_pil, txts) in enumerate(dataloader):
176
+ # imgs_pil = [resize_and_center_crop(imgs)]
177
+ # txts = [txts]
178
+ # imgs_pil = [clip_preprocessor(img) for img in imgs]
179
+ imgs = torch.stack(imgs_pil, dim=0).to(device)
180
+ tokens = openai_clip.tokenize(txts, truncate=True).to(device)
181
+ # Prepending text prompts with "A photo depicts "
182
+ # https://arxiv.org/abs/2104.08718
183
+ prepend_text = "A photo depicts "
184
+ prepend_text_token = openai_clip.tokenize(prepend_text)[
185
+ :, 1:4].to(device)
186
+ prepend_text_tokens = prepend_text_token.expand(tokens.shape[0], -1)
187
+
188
+ start_tokens = tokens[:, :1]
189
+ new_text_tokens = torch.cat(
190
+ [start_tokens, prepend_text_tokens, tokens[:, 1:]], dim=1)[:, :77]
191
+ last_cols = new_text_tokens[:, 77 - 1:77]
192
+ last_cols[last_cols > 0] = 49407 # eot token
193
+ new_text_tokens = torch.cat(
194
+ [new_text_tokens[:, :76], last_cols], dim=1)
195
+
196
+ img_embs = clip.encode_image(imgs)
197
+ text_embs = clip.encode_text(new_text_tokens)
198
+
199
+ similarities = torch.nn.functional.cosine_similarity(
200
+ img_embs, text_embs, dim=1)
201
+ cos_sims.append(similarities)
202
+ count += similarities.shape[0]
203
+ if count >= how_many:
204
+ break
205
+
206
+ clip_score = torch.cat(cos_sims, dim=0)[:how_many].mean()
207
+ clip_score = clip_score.detach().cpu().numpy()
208
+ return clip_score
209
+
210
+
211
+ @torch.no_grad()
212
+ def compute_image_reward(
213
+ images, captions, device
214
+ ):
215
+ import ImageReward as RM
216
+ from tqdm import tqdm
217
+ model = RM.load("ImageReward-v1.0", device=device)
218
+ rewards = []
219
+ for image, prompt in tqdm(zip(images, captions)):
220
+ reward = model.score(prompt, Image.fromarray(image))
221
+ rewards.append(reward)
222
+ return np.mean(np.array(rewards))
223
+
224
+
225
+ @torch.no_grad()
226
+ def compute_diversity_score(
227
+ lpips_loss_func, images, device
228
+ ):
229
+ # resize all image to 512 and convert to tensor
230
+ images = [Image.fromarray(image) for image in images]
231
+ images = [image.resize((512, 512), Image.LANCZOS) for image in images]
232
+ images = np.stack([np.array(image) for image in images], axis=0)
233
+ images = torch.tensor(images).to(device).float() / 255.0
234
+ images = images.permute(0, 3, 1, 2)
235
+
236
+ num_images = images.shape[0]
237
+ loss_list = []
238
+
239
+ for i in range(num_images):
240
+ for j in range(i + 1, num_images):
241
+ image1 = images[i].unsqueeze(0)
242
+ image2 = images[j].unsqueeze(0)
243
+ loss = lpips_loss_func(image1, image2)
244
+
245
+ loss_list.append(loss.item())
246
+ return np.mean(loss_list)
exp_code/1_benchmark/CausVid/causvid/evaluation/eval_sdxl_coco.py ADDED
@@ -0,0 +1,135 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install git+https://github.com/openai/CLIP.git
2
+ # pip install open_clip_torch
3
+ from causvid.evaluation.coco_eval.coco_evaluator import evaluate_model, compute_clip_score
4
+ from diffusers import DiffusionPipeline, LCMScheduler, DDIMScheduler
5
+ from causvid.util import launch_distributed_job
6
+ import torch.distributed as dist
7
+ from tqdm import tqdm
8
+ import numpy as np
9
+ import argparse
10
+ import torch
11
+ import time
12
+ import os
13
+
14
+
15
+ def load_generator(checkpoint_path, generator):
16
+ # sometime the state_dict is not fully saved yet
17
+ counter = 0
18
+ while True:
19
+ try:
20
+ state_dict = torch.load(checkpoint_path, map_location="cpu")[
21
+ 'generator']
22
+ break
23
+ except:
24
+ print(f"fail to load checkpoint {checkpoint_path}")
25
+ time.sleep(1)
26
+
27
+ counter += 1
28
+
29
+ if counter > 100:
30
+ return None
31
+
32
+ state_dict = {k.replace("model.", ""): v for k, v in state_dict.items()}
33
+ print(generator.load_state_dict(state_dict, strict=True))
34
+ return generator
35
+
36
+
37
+ def sample(pipeline, prompt_list, denoising_step_list, batch_size):
38
+ num_prompts = len(prompt_list)
39
+ num_steps = len(denoising_step_list)
40
+
41
+ images = []
42
+ all_prompts = []
43
+ for i in tqdm(range(0, num_prompts, batch_size)):
44
+ batch_prompt = prompt_list[i:i + batch_size]
45
+ timesteps = None if isinstance(
46
+ pipeline.scheduler, DDIMScheduler) else denoising_step_list
47
+ batch_images = pipeline(prompt=batch_prompt, num_inference_steps=num_steps, timesteps=timesteps,
48
+ guidance_scale=0, output_type='np').images
49
+ batch_images = (batch_images * 255.0).astype("uint8")
50
+ images.extend(batch_images)
51
+ all_prompts.extend(batch_prompt)
52
+
53
+ torch.cuda.empty_cache()
54
+
55
+ all_images = np.stack(images, axis=0)
56
+
57
+ data_dict = {"all_images": all_images, "all_captions": all_prompts}
58
+
59
+ return data_dict
60
+
61
+
62
+ @torch.no_grad()
63
+ def main():
64
+ parser = argparse.ArgumentParser()
65
+ parser.add_argument("--denoising_step_list", type=int,
66
+ nargs="+", required=True)
67
+ parser.add_argument("--batch_size", type=int, default=16)
68
+ parser.add_argument("--prompt_path", type=str, required=True)
69
+ parser.add_argument("--checkpoint_path", type=str, required=True)
70
+ parser.add_argument("--local_rank", type=int, default=-1)
71
+ parser.add_argument("--ref_dir", type=str, required=True)
72
+ parser.add_argument("--eval_res", type=int, default=256)
73
+ parser.add_argument("--scheduler", type=str,
74
+ choices=['ddim', 'lcm'], default='lcm')
75
+
76
+ args = parser.parse_args()
77
+
78
+ # Step 1: Setup the environment
79
+ torch.backends.cuda.matmul.allow_tf32 = True
80
+ torch.backends.cudnn.allow_tf32 = True
81
+ torch.set_grad_enabled(False)
82
+
83
+ # Step 2: Create the generator
84
+ launch_distributed_job()
85
+ device = torch.cuda.current_device()
86
+
87
+ pipeline = DiffusionPipeline.from_pretrained(
88
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32).to(device)
89
+ if args.scheduler == "ddim":
90
+ pipeline.scheduler = DDIMScheduler.from_config(
91
+ pipeline.scheduler.config, timestep_spacing="trailing")
92
+ elif args.scheduler == "lcm":
93
+ pipeline.scheduler = LCMScheduler.from_config(
94
+ pipeline.scheduler.config)
95
+ pipeline.set_progress_bar_config(disable=True)
96
+ pipeline.safety_checker = None
97
+
98
+ # Step 3: Generate images
99
+ prompt_list = []
100
+ with open(args.prompt_path, "r") as f:
101
+ for line in f:
102
+ prompt_list.append(line.strip())
103
+
104
+ generator = load_generator(os.path.join(
105
+ args.checkpoint_path, "model.pt"), pipeline.unet)
106
+
107
+ if generator is None:
108
+ return
109
+
110
+ pipeline.unet = generator
111
+ data_dict = sample(pipeline, prompt_list,
112
+ args.denoising_step_list, args.batch_size)
113
+
114
+ # Step 4: Evaluate the generated images
115
+
116
+ # evaluate and write stats to file
117
+ if dist.get_rank() == 0:
118
+ fid = evaluate_model(
119
+ args, device, data_dict["all_images"], patch_fid=False)
120
+
121
+ clip_score = compute_clip_score(
122
+ images=data_dict["all_images"],
123
+ captions=data_dict["all_captions"],
124
+ clip_model="ViT-G/14",
125
+ device=device,
126
+ how_many=len(data_dict["all_images"])
127
+ )
128
+ print(f"fid {fid} clip score {clip_score}")
129
+
130
+ with open(os.path.join(args.checkpoint_path, "eval_stats.txt"), "w") as f:
131
+ f.write(f"fid {fid} clip score {clip_score}")
132
+
133
+
134
+ if __name__ == "__main__":
135
+ main()
exp_code/1_benchmark/CausVid/causvid/evaluation/inference_sdxl.py ADDED
@@ -0,0 +1,146 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # pip install git+https://github.com/openai/CLIP.git
2
+ # pip install open_clip_torch
3
+ from diffusers import StableDiffusionXLPipeline, LCMScheduler, DDIMScheduler
4
+ from causvid.util import launch_distributed_job
5
+ from PIL import Image
6
+ from tqdm import tqdm
7
+ import numpy as np
8
+ import argparse
9
+ import torch
10
+ import time
11
+ import os
12
+ import re
13
+
14
+
15
+ def load_generator(checkpoint_path, generator):
16
+ # sometime the state_dict is not fully saved yet
17
+ counter = 0
18
+ while True:
19
+ try:
20
+ state_dict = torch.load(checkpoint_path, map_location="cpu")[
21
+ 'generator']
22
+ break
23
+ except:
24
+ print(f"fail to load checkpoint {checkpoint_path}")
25
+ time.sleep(1)
26
+
27
+ counter += 1
28
+
29
+ if counter > 100:
30
+ return None
31
+
32
+ state_dict = {k.replace("model.", ""): v for k, v in state_dict.items()}
33
+ print(generator.load_state_dict(state_dict, strict=True))
34
+ return generator
35
+
36
+
37
+ def sample(pipeline, prompt_list, denoising_step_list, batch_size):
38
+ num_prompts = len(prompt_list)
39
+ num_steps = len(denoising_step_list)
40
+
41
+ images = []
42
+ all_prompts = []
43
+ for i in tqdm(range(0, num_prompts, batch_size)):
44
+ batch_prompt = prompt_list[i:i + batch_size]
45
+ timesteps = None if isinstance(
46
+ pipeline.scheduler, DDIMScheduler) else denoising_step_list
47
+ batch_images = pipeline(prompt=batch_prompt, num_inference_steps=num_steps, timesteps=timesteps,
48
+ guidance_scale=0, output_type='np').images
49
+ batch_images = (batch_images * 255.0).astype("uint8")
50
+ images.extend(batch_images)
51
+ all_prompts.extend(batch_prompt)
52
+
53
+ torch.cuda.empty_cache()
54
+
55
+ all_images = np.stack(images, axis=0)
56
+
57
+ data_dict = {"all_images": all_images, "all_captions": all_prompts}
58
+
59
+ return data_dict
60
+
61
+
62
+ @torch.no_grad()
63
+ def main():
64
+ parser = argparse.ArgumentParser()
65
+ parser.add_argument("--denoising_step_list", type=int,
66
+ nargs="+", required=True)
67
+ parser.add_argument("--batch_size", type=int, default=16)
68
+ parser.add_argument("--prompt_path", type=str, required=True)
69
+ parser.add_argument("--checkpoint_path", type=str, required=True)
70
+ parser.add_argument("--output_dir", type=str, default="./output")
71
+ parser.add_argument("--local_rank", type=int, default=-1)
72
+ parser.add_argument("--scheduler", type=str, choices=['ddim', 'lcm'], default='lcm')
73
+
74
+ args = parser.parse_args()
75
+
76
+ # Step 1: Setup the environment
77
+ torch.backends.cuda.matmul.allow_tf32 = True
78
+ torch.backends.cudnn.allow_tf32 = True
79
+ torch.set_grad_enabled(False)
80
+
81
+ # Step 2: Create the generator
82
+ launch_distributed_job()
83
+ device = torch.cuda.current_device()
84
+
85
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
86
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float32).to(device)
87
+ if args.scheduler == "ddim":
88
+ pipeline.scheduler = DDIMScheduler.from_config(
89
+ pipeline.scheduler.config, timestep_spacing="trailing")
90
+ elif args.scheduler == "lcm":
91
+ pipeline.scheduler = LCMScheduler.from_config(pipeline.scheduler.config)
92
+
93
+ pipeline.set_progress_bar_config(disable=True)
94
+ pipeline.safety_checker = None
95
+
96
+ # Step 3: Generate images
97
+ prompt_list = []
98
+ with open(args.prompt_path, "r") as f:
99
+ for line in f:
100
+ prompt_list.append(line.strip())
101
+
102
+ generator = load_generator(os.path.join(
103
+ args.checkpoint_path, "model.pt"), pipeline.unet)
104
+
105
+ if generator is None:
106
+ return
107
+
108
+ pipeline.unet = generator
109
+ data_dict = sample(pipeline, prompt_list,
110
+ args.denoising_step_list, args.batch_size)
111
+
112
+ os.makedirs(args.output_dir, exist_ok=True)
113
+
114
+ def sanitize_filename(name):
115
+ """
116
+ Remove any characters that are not alphanumeric, spaces, underscores, or hyphens.
117
+ Then replace spaces with underscores.
118
+ """
119
+ # Remove unwanted characters (anything not a word character, space, or hyphen)
120
+ name = re.sub(r'[^\w\s-]', '', name)
121
+ # Replace spaces with underscores and strip leading/trailing whitespace
122
+ return name.strip().replace(' ', '_')
123
+
124
+ for idx, (img_array, prompt) in enumerate(zip(data_dict['all_images'], data_dict['all_captions'])):
125
+ # Split the prompt into words and take the first four words.
126
+ words = prompt.split()
127
+ if len(words) >= 10:
128
+ base_name = ' '.join(words[:10])
129
+ else:
130
+ base_name = ' '.join(words)
131
+
132
+ # Sanitize the base file name to remove problematic characters.
133
+ base_name = sanitize_filename(base_name)
134
+
135
+ # Append the index to ensure uniqueness (in case two prompts share the same first four words).
136
+ file_name = f"{base_name}_{idx}.jpg"
137
+
138
+ # Create a PIL Image from the numpy array.
139
+ image = Image.fromarray(img_array)
140
+
141
+ # Save the image in the specified folder.
142
+ image.save(os.path.join(args.output_dir, file_name))
143
+
144
+
145
+ if __name__ == "__main__":
146
+ main()
exp_code/1_benchmark/CausVid/causvid/evaluation/parallel_sdxl_eval.sh ADDED
@@ -0,0 +1,54 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env bash
2
+
3
+ # ----------------------
4
+ # User-defined variables
5
+ # ----------------------
6
+ CHECKPOINT_DIR="/mnt/localssd/sdxl_logs/2025-01-23-15-16-31.765725_seed228885"
7
+ PROMPT_PATH="captions_coco10k.txt"
8
+ REF_DIR="/mnt/localssd/coco10k/subset/"
9
+ DENOSING_STEPS="999 749 499 249"
10
+
11
+ # Adjust this if you have a different number of GPUs available
12
+ NUM_GPUS=8
13
+
14
+ # -------------
15
+ # Main script
16
+ # -------------
17
+ # Grab all checkpoints in the folder
18
+ CHECKPOINTS=(${CHECKPOINT_DIR}/checkpoint_model_*)
19
+
20
+ # Print how many checkpoints were found
21
+ echo "Found ${#CHECKPOINTS[@]} checkpoints in ${CHECKPOINT_DIR}"
22
+
23
+ # Loop over each checkpoint and launch a job
24
+ for ((i=0; i<${#CHECKPOINTS[@]}; i++)); do
25
+
26
+ # GPU to use (round-robin assignment)
27
+ GPU_ID=$(( i % NUM_GPUS ))
28
+
29
+ # Pick a unique port for each process. For example, offset from 29500.
30
+ # Feel free to choose any range that won't collide with other applications.
31
+ MASTER_PORT=$((29500 + i))
32
+
33
+ echo "Launching eval for checkpoint: ${CHECKPOINTS[$i]} on GPU ${GPU_ID}, master_port ${MASTER_PORT}"
34
+
35
+ # Run eval on GPU_ID, put the process in the background
36
+ CUDA_VISIBLE_DEVICES=$GPU_ID torchrun --nproc_per_node 1 \
37
+ --master_port ${MASTER_PORT} \
38
+ causvid/evaluation/eval_sdxl_coco.py \
39
+ --denoising_step_list $DENOSING_STEPS \
40
+ --prompt_path "$PROMPT_PATH" \
41
+ --checkpoint_path "${CHECKPOINTS[$i]}" \
42
+ --ref_dir "$REF_DIR" &
43
+
44
+ # If we've launched as many parallel tasks as GPUs, wait for this batch to finish
45
+ if (( (i+1) % NUM_GPUS == 0 )); then
46
+ echo "Waiting for batch of $NUM_GPUS processes to finish..."
47
+ wait
48
+ fi
49
+ done
50
+
51
+ # If there are leftover tasks that didn't perfectly divide into NUM_GPUS, wait again
52
+ wait
53
+
54
+ echo "All evaluations have completed."
exp_code/1_benchmark/CausVid/causvid/loss.py ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from abc import ABC, abstractmethod
2
+ import torch
3
+
4
+
5
+ class DenoisingLoss(ABC):
6
+ @abstractmethod
7
+ def __call__(
8
+ self, x: torch.Tensor, x_pred: torch.Tensor,
9
+ noise: torch.Tensor, noise_pred: torch.Tensor,
10
+ alphas_cumprod: torch.Tensor,
11
+ timestep: torch.Tensor,
12
+ **kwargs
13
+ ) -> torch.Tensor:
14
+ """
15
+ Base class for denoising loss.
16
+ Input:
17
+ - x: the clean data with shape [B, F, C, H, W]
18
+ - x_pred: the predicted clean data with shape [B, F, C, H, W]
19
+ - noise: the noise with shape [B, F, C, H, W]
20
+ - noise_pred: the predicted noise with shape [B, F, C, H, W]
21
+ - alphas_cumprod: the cumulative product of alphas (defining the noise schedule) with shape [T]
22
+ - timestep: the current timestep with shape [B, F]
23
+ """
24
+ pass
25
+
26
+
27
+ class X0PredLoss(DenoisingLoss):
28
+ def __call__(
29
+ self, x: torch.Tensor, x_pred: torch.Tensor,
30
+ noise: torch.Tensor, noise_pred: torch.Tensor,
31
+ alphas_cumprod: torch.Tensor,
32
+ timestep: torch.Tensor,
33
+ **kwargs
34
+ ) -> torch.Tensor:
35
+ return torch.mean((x - x_pred) ** 2)
36
+
37
+
38
+ class VPredLoss(DenoisingLoss):
39
+ def __call__(
40
+ self, x: torch.Tensor, x_pred: torch.Tensor,
41
+ noise: torch.Tensor, noise_pred: torch.Tensor,
42
+ alphas_cumprod: torch.Tensor,
43
+ timestep: torch.Tensor,
44
+ **kwargs
45
+ ) -> torch.Tensor:
46
+ weights = 1 / \
47
+ (1 - alphas_cumprod[timestep].reshape(*timestep.shape, 1, 1, 1))
48
+ return torch.mean(weights * (x - x_pred) ** 2)
49
+
50
+
51
+ class NoisePredLoss(DenoisingLoss):
52
+ def __call__(
53
+ self, x: torch.Tensor, x_pred: torch.Tensor,
54
+ noise: torch.Tensor, noise_pred: torch.Tensor,
55
+ alphas_cumprod: torch.Tensor,
56
+ timestep: torch.Tensor,
57
+ **kwargs
58
+ ) -> torch.Tensor:
59
+ return torch.mean((noise - noise_pred) ** 2)
60
+
61
+
62
+ class FlowPredLoss(DenoisingLoss):
63
+ def __call__(
64
+ self, x: torch.Tensor, x_pred: torch.Tensor,
65
+ noise: torch.Tensor, noise_pred: torch.Tensor,
66
+ alphas_cumprod: torch.Tensor,
67
+ timestep: torch.Tensor,
68
+ **kwargs
69
+ ) -> torch.Tensor:
70
+ return torch.mean((kwargs["flow_pred"] - (noise - x)) ** 2)
71
+
72
+
73
+ NAME_TO_CLASS = {
74
+ "x0": X0PredLoss,
75
+ "v": VPredLoss,
76
+ "noise": NoisePredLoss,
77
+ "flow": FlowPredLoss
78
+ }
79
+
80
+
81
+ def get_denoising_loss(loss_type: str) -> DenoisingLoss:
82
+ return NAME_TO_CLASS[loss_type]
exp_code/1_benchmark/CausVid/causvid/models/__init__.py ADDED
@@ -0,0 +1,56 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from .wan.wan_wrapper import WanTextEncoder, WanVAEWrapper, WanDiffusionWrapper, CausalWanDiffusionWrapper
2
+ from causvid.bidirectional_trajectory_pipeline import BidirectionalInferenceWrapper
3
+ from .sdxl.sdxl_wrapper import SDXLWrapper, SDXLTextEncoder, SDXLVAE
4
+ from transformers.models.t5.modeling_t5 import T5Block
5
+
6
+
7
+ DIFFUSION_NAME_TO_CLASS = {
8
+ "sdxl": SDXLWrapper,
9
+ "wan": WanDiffusionWrapper,
10
+ "causal_wan": CausalWanDiffusionWrapper
11
+ }
12
+
13
+
14
+ def get_diffusion_wrapper(model_name):
15
+ return DIFFUSION_NAME_TO_CLASS[model_name]
16
+
17
+
18
+ TEXTENCODER_NAME_TO_CLASS = {
19
+ "sdxl": SDXLTextEncoder,
20
+ "wan": WanTextEncoder,
21
+ "causal_wan": WanTextEncoder
22
+ }
23
+
24
+
25
+ def get_text_encoder_wrapper(model_name):
26
+ return TEXTENCODER_NAME_TO_CLASS[model_name]
27
+
28
+
29
+ VAE_NAME_TO_CLASS = {
30
+ "sdxl": SDXLVAE,
31
+ "wan": WanVAEWrapper,
32
+ "causal_wan": WanVAEWrapper # TODO: Change the VAE to the causal version
33
+ }
34
+
35
+
36
+ def get_vae_wrapper(model_name):
37
+ return VAE_NAME_TO_CLASS[model_name]
38
+
39
+
40
+ PIPELINE_NAME_TO_CLASS = {
41
+ "sdxl": BidirectionalInferenceWrapper,
42
+ "wan": BidirectionalInferenceWrapper
43
+ }
44
+
45
+
46
+ def get_inference_pipeline_wrapper(model_name, **kwargs):
47
+ return PIPELINE_NAME_TO_CLASS[model_name](**kwargs)
48
+
49
+
50
+ BLOCK_NAME_TO_BLOCK_CLASS = {
51
+ "T5Block": T5Block
52
+ }
53
+
54
+
55
+ def get_block_class(model_name):
56
+ return BLOCK_NAME_TO_BLOCK_CLASS[model_name]
exp_code/1_benchmark/CausVid/causvid/models/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (1.66 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/__pycache__/model_interface.cpython-312.pyc ADDED
Binary file (7.17 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/model_interface.py ADDED
@@ -0,0 +1,114 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.scheduler import SchedulerInterface
2
+ from abc import abstractmethod, ABC
3
+ from typing import List, Optional
4
+ import torch
5
+ import types
6
+
7
+
8
+ class DiffusionModelInterface(ABC, torch.nn.Module):
9
+ scheduler: SchedulerInterface
10
+
11
+ @abstractmethod
12
+ def forward(
13
+ self, noisy_image_or_video: torch.Tensor, conditional_dict: dict,
14
+ timestep: torch.Tensor, kv_cache: Optional[List[dict]] = None,
15
+ crossattn_cache: Optional[List[dict]] = None,
16
+ current_start: Optional[int] = None,
17
+ current_end: Optional[int] = None
18
+ ) -> torch.Tensor:
19
+ """
20
+ A method to run diffusion model.
21
+ Input:
22
+ - noisy_image_or_video: a tensor with shape [B, F, C, H, W] where the number of frame is 1 for images.
23
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
24
+ - timestep: a tensor with shape [B, F] where the number of frame is 1 for images.
25
+ all data should be on the same device as the model.
26
+ - kv_cache: a list of dictionaries containing the key and value tensors for each attention layer.
27
+ - current_start: the start index of the current frame in the sequence.
28
+ - current_end: the end index of the current frame in the sequence.
29
+ Output: a tensor with shape [B, F, C, H, W] where the number of frame is 1 for images.
30
+ We always expect a X0 prediction form for the output.
31
+ """
32
+ pass
33
+
34
+ def get_scheduler(self) -> SchedulerInterface:
35
+ """
36
+ Update the current scheduler with the interface's static method
37
+ """
38
+ scheduler = self.scheduler
39
+ scheduler.convert_x0_to_noise = types.MethodType(
40
+ SchedulerInterface.convert_x0_to_noise, scheduler)
41
+ scheduler.convert_noise_to_x0 = types.MethodType(
42
+ SchedulerInterface.convert_noise_to_x0, scheduler)
43
+ scheduler.convert_velocity_to_x0 = types.MethodType(
44
+ SchedulerInterface.convert_velocity_to_x0, scheduler)
45
+ self.scheduler = scheduler
46
+ return scheduler
47
+
48
+ def post_init(self):
49
+ """
50
+ A few custom initialization steps that should be called after the object is created.
51
+ Currently, the only one we have is to bind a few methods to scheduler.
52
+ We can gradually add more methods here if needed.
53
+ """
54
+ self.get_scheduler()
55
+
56
+ def set_module_grad(self, module_grad: dict) -> None:
57
+ """
58
+ Adjusts the state of each module in the object.
59
+
60
+ Parameters:
61
+ - module_grad (dict): A dictionary where each key is the name of a module (as an attribute of the object),
62
+ and each value is a bool indicating whether the module's parameters require gradients.
63
+
64
+ Functionality:
65
+ For each module name in the dictionary:
66
+ - Updates whether its parameters require gradients based on 'is_trainable'.
67
+ """
68
+ for k, is_trainable in module_grad.items():
69
+ getattr(self, k).requires_grad_(is_trainable)
70
+
71
+ @abstractmethod
72
+ def enable_gradient_checkpointing(self) -> None:
73
+ """
74
+ Activates gradient checkpointing for the current model (may be referred to as *activation checkpointing* or
75
+ *checkpoint activations* in other frameworks).
76
+ """
77
+ pass
78
+
79
+
80
+ class VAEInterface(ABC, torch.nn.Module):
81
+ @abstractmethod
82
+ def decode_to_pixel(self, latent: torch.Tensor) -> torch.Tensor:
83
+ """
84
+ A method to decode a latent representation to an image or video.
85
+ Input: a tensor with shape [B, F // T, C, H // S, W // S] where T and S are temporal and spatial compression factors.
86
+ Output: a tensor with shape [B, F, C, H, W] where the number of frame is 1 for images.
87
+ """
88
+ pass
89
+
90
+
91
+ class TextEncoderInterface(ABC, torch.nn.Module):
92
+ @abstractmethod
93
+ def forward(self, text_prompts: List[str]) -> dict:
94
+ """
95
+ A method to tokenize text prompts with a tokenizer and encode them into a latent representation.
96
+ Input: a list of strings.
97
+ Output: a dictionary containing the encoded representation of the text prompts.
98
+ """
99
+ pass
100
+
101
+
102
+ class InferencePipelineInterface(ABC):
103
+ @abstractmethod
104
+ def inference_with_trajectory(self, noise: torch.Tensor, conditional_dict: dict) -> torch.Tensor:
105
+ """
106
+ Run inference with the given diffusion / distilled generators.
107
+ Input:
108
+ - noise: a tensor sampled from N(0, 1) with shape [B, F, C, H, W] where the number of frame is 1 for images.
109
+ - conditional_dict: a dictionary containing the conditional information (e.g. text embeddings, image embeddings).
110
+ Output:
111
+ - output: a tensor with shape [B, T, F, C, H, W].
112
+ T is the total number of timesteps. output[0] is a pure noise and output[i] and i>0
113
+ represents the x0 prediction at each timestep.
114
+ """
exp_code/1_benchmark/CausVid/causvid/models/sdxl/__pycache__/sdxl_wrapper.cpython-312.pyc ADDED
Binary file (9.04 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/sdxl/sdxl_wrapper.py ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models.model_interface import (
2
+ DiffusionModelInterface,
3
+ TextEncoderInterface,
4
+ VAEInterface
5
+ )
6
+ from diffusers import UNet2DConditionModel, AutoencoderKL, DDIMScheduler
7
+ from transformers import CLIPTextModel, CLIPTextModelWithProjection
8
+ from transformers import AutoTokenizer
9
+ from typing import List
10
+ import torch
11
+
12
+
13
+ class SDXLTextEncoder(TextEncoderInterface):
14
+ def __init__(self) -> None:
15
+ super().__init__()
16
+
17
+ self.text_encoder_one = CLIPTextModel.from_pretrained(
18
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder", revision=None
19
+ )
20
+
21
+ self.text_encoder_two = CLIPTextModelWithProjection.from_pretrained(
22
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="text_encoder_2", revision=None
23
+ )
24
+
25
+ self.tokenizer_one = AutoTokenizer.from_pretrained(
26
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="tokenizer", revision=None, use_fast=False
27
+ )
28
+
29
+ self.tokenizer_two = AutoTokenizer.from_pretrained(
30
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="tokenizer_2", revision=None, use_fast=False
31
+ )
32
+
33
+ @property
34
+ def device(self):
35
+ return next(self.parameters()).device
36
+
37
+ def _model_forward(self, batch: dict) -> dict:
38
+ """
39
+ Processes two sets of input token IDs using two separate text encoders, and returns both
40
+ concatenated token-level embeddings and pooled sentence-level embeddings.
41
+
42
+ Args:
43
+ batch (dict):
44
+ A dictionary containing:
45
+ - text_input_ids_one (torch.Tensor): The token IDs for the first tokenizer,
46
+ of shape [batch_size, num_tokens].
47
+ - text_input_ids_two (torch.Tensor): The token IDs for the second tokenizer,
48
+ of shape [batch_size, num_tokens].
49
+
50
+ Returns:
51
+ dict: A dictionary with two keys:
52
+ - "prompt_embeds" (torch.Tensor): Concatenated embeddings from the second-to-last
53
+ hidden states of both text encoders, of shape [batch_size, num_tokens, hidden_dim * 2].
54
+ - "pooled_prompt_embeds" (torch.Tensor): Pooled embeddings (final layer output)
55
+ from the second text encoder, of shape [batch_size, hidden_dim].
56
+ """
57
+ text_input_ids_one = batch['text_input_ids_one']
58
+ text_input_ids_two = batch['text_input_ids_two']
59
+ prompt_embeds_list = []
60
+
61
+ for text_input_ids, text_encoder in zip([text_input_ids_one, text_input_ids_two], [self.text_encoder_one, self.text_encoder_two]):
62
+ prompt_embeds = text_encoder(
63
+ text_input_ids.to(self.device),
64
+ output_hidden_states=True,
65
+ )
66
+
67
+ # We are only interested in the pooled output of the final text encoder
68
+ pooled_prompt_embeds = prompt_embeds[0]
69
+
70
+ prompt_embeds = prompt_embeds.hidden_states[-2]
71
+ bs_embed, seq_len, _ = prompt_embeds.shape
72
+ prompt_embeds = prompt_embeds.view(bs_embed, seq_len, -1)
73
+ prompt_embeds_list.append(prompt_embeds)
74
+
75
+ prompt_embeds = torch.cat(prompt_embeds_list, dim=-1)
76
+ # use the second text encoder's pooled prompt embeds (overwrite in for loop)
77
+ pooled_prompt_embeds = pooled_prompt_embeds.view(
78
+ len(text_input_ids_one), -1)
79
+
80
+ output_dict = {
81
+ "prompt_embeds": prompt_embeds,
82
+ "pooled_prompt_embeds": pooled_prompt_embeds,
83
+ }
84
+ return output_dict
85
+
86
+ def _encode_prompt(self, prompt_list):
87
+ """
88
+ Encodes a list of prompts with two tokenizers and returns a dictionary
89
+ of the resulting tensors.
90
+ """
91
+ text_input_ids_one = self.tokenizer_one(
92
+ prompt_list,
93
+ padding="max_length",
94
+ max_length=self.tokenizer_one.model_max_length,
95
+ truncation=True,
96
+ return_tensors="pt"
97
+ ).input_ids
98
+
99
+ text_input_ids_two = self.tokenizer_two(
100
+ prompt_list,
101
+ padding="max_length",
102
+ max_length=self.tokenizer_two.model_max_length,
103
+ truncation=True,
104
+ return_tensors="pt"
105
+ ).input_ids
106
+
107
+ prompt_dict = {
108
+ 'text_input_ids_one': text_input_ids_one,
109
+ 'text_input_ids_two': text_input_ids_two
110
+ }
111
+ return prompt_dict
112
+
113
+ def forward(self, text_prompts: List[str]) -> dict:
114
+ tokenized_prompts = self._encode_prompt(text_prompts)
115
+ return self._model_forward(tokenized_prompts)
116
+
117
+
118
+ class SDXLVAE(VAEInterface):
119
+ def __init__(self):
120
+ super().__init__()
121
+
122
+ self.vae = AutoencoderKL.from_pretrained(
123
+ "stabilityai/stable-diffusion-xl-base-1.0",
124
+ subfolder="vae"
125
+ )
126
+
127
+ def decode_to_pixel(self, latent: torch.Tensor) -> torch.Tensor:
128
+ latent = latent.squeeze(1)
129
+ latent = latent / self.vae.config.scaling_factor
130
+ # ensure the output is float
131
+ image = self.vae.decode(latent).sample.float()
132
+ image = image.unsqueeze(1)
133
+ return image
134
+
135
+
136
+ class SDXLWrapper(DiffusionModelInterface):
137
+ def __init__(self):
138
+ super().__init__()
139
+
140
+ self.model = UNet2DConditionModel.from_pretrained(
141
+ "stabilityai/stable-diffusion-xl-base-1.0",
142
+ subfolder="unet"
143
+ )
144
+
145
+ self.add_time_ids = self._build_condition_input(resolution=1024)
146
+
147
+ self.scheduler = DDIMScheduler.from_pretrained(
148
+ "stabilityai/stable-diffusion-xl-base-1.0",
149
+ subfolder="scheduler"
150
+ )
151
+
152
+ super().post_init()
153
+
154
+ def enable_gradient_checkpointing(self) -> None:
155
+ self.model.enable_gradient_checkpointing()
156
+
157
+ def forward(
158
+ self, noisy_image_or_video: torch.Tensor, conditional_dict: dict,
159
+ timestep: torch.Tensor, kv_cache: List[dict] = None, current_start: int = None,
160
+ current_end: int = None
161
+ ) -> torch.Tensor:
162
+ # TODO: Check how to apply gradient checkpointing
163
+ # [B, 1, C, H, W] -> [B, C, H, W]
164
+ noisy_image_or_video = noisy_image_or_video.squeeze(1)
165
+
166
+ # [B, 1] -> [B]
167
+ timestep = timestep.squeeze(1)
168
+
169
+ added_conditions = {
170
+ "time_ids": self.add_time_ids.repeat(noisy_image_or_video.shape[0], 1).to(noisy_image_or_video.device),
171
+ "text_embeds": conditional_dict["pooled_prompt_embeds"]
172
+ }
173
+
174
+ pred_noise = self.model(
175
+ sample=noisy_image_or_video,
176
+ timestep=timestep,
177
+ encoder_hidden_states=conditional_dict['prompt_embeds'],
178
+ added_cond_kwargs=added_conditions
179
+ ).sample
180
+
181
+ pred_x0 = self.scheduler.convert_noise_to_x0(
182
+ noise=pred_noise,
183
+ xt=noisy_image_or_video,
184
+ timestep=timestep
185
+ )
186
+
187
+ # [B, C, H, W] -> [B, 1, C, H, W]
188
+ pred_x0 = pred_x0.unsqueeze(1)
189
+
190
+ return pred_x0
191
+
192
+ @staticmethod
193
+ def _build_condition_input(resolution):
194
+ original_size = (resolution, resolution)
195
+ target_size = (resolution, resolution)
196
+ crop_top_left = (0, 0)
197
+
198
+ add_time_ids = list(original_size + crop_top_left + target_size)
199
+ add_time_ids = torch.tensor([add_time_ids], dtype=torch.float32)
200
+ return add_time_ids
exp_code/1_benchmark/CausVid/causvid/models/wan/__init__.py ADDED
File without changes
exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (187 Bytes). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/causal_inference.cpython-312.pyc ADDED
Binary file (9.29 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/causal_model.cpython-312.pyc ADDED
Binary file (35 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/flow_match.cpython-312.pyc ADDED
Binary file (5.73 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/__pycache__/wan_wrapper.cpython-312.pyc ADDED
Binary file (12.3 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/bidirectional_inference.py ADDED
@@ -0,0 +1,69 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models import (
2
+ get_diffusion_wrapper,
3
+ get_text_encoder_wrapper,
4
+ get_vae_wrapper
5
+ )
6
+ from typing import List
7
+ import torch
8
+
9
+
10
+ class BidirectionalInferencePipeline(torch.nn.Module):
11
+ def __init__(self, args, device):
12
+ super().__init__()
13
+ # Step 1: Initialize all models
14
+ self.generator_model_name = getattr(
15
+ args, "generator_name", args.model_name)
16
+ self.generator = get_diffusion_wrapper(
17
+ model_name=self.generator_model_name)()
18
+ self.text_encoder = get_text_encoder_wrapper(
19
+ model_name=args.model_name)()
20
+ self.vae = get_vae_wrapper(model_name=args.model_name)()
21
+
22
+ # Step 2: Initialize all bidirectional wan hyperparmeters
23
+ self.denoising_step_list = torch.tensor(
24
+ args.denoising_step_list, dtype=torch.long, device=device)
25
+
26
+ self.scheduler = self.generator.get_scheduler()
27
+ if args.warp_denoising_step: # Warp the denoising step according to the scheduler time shift
28
+ timesteps = torch.cat((self.scheduler.timesteps.cpu(), torch.tensor([0], dtype=torch.float32))).cuda()
29
+ self.denoising_step_list = timesteps[1000 - self.denoising_step_list]
30
+
31
+ def inference(self, noise: torch.Tensor, text_prompts: List[str]) -> torch.Tensor:
32
+ """
33
+ Perform inference on the given noise and text prompts.
34
+ Inputs:
35
+ noise (torch.Tensor): The input noise tensor of shape
36
+ (batch_size, num_frames, num_channels, height, width).
37
+ text_prompts (List[str]): The list of text prompts.
38
+ Outputs:
39
+ video (torch.Tensor): The generated video tensor of shape
40
+ (batch_size, num_frames, num_channels, height, width). It is normalized to be in the range [0, 1].
41
+ """
42
+ conditional_dict = self.text_encoder(
43
+ text_prompts=text_prompts
44
+ )
45
+
46
+ # initial point
47
+ noisy_image_or_video = noise
48
+
49
+ for index, current_timestep in enumerate(self.denoising_step_list):
50
+ pred_image_or_video = self.generator(
51
+ noisy_image_or_video=noisy_image_or_video,
52
+ conditional_dict=conditional_dict,
53
+ timestep=torch.ones(
54
+ noise.shape[:2], dtype=torch.long, device=noise.device) * current_timestep
55
+ ) # [B, F, C, H, W]
56
+
57
+ if index < len(self.denoising_step_list) - 1:
58
+ next_timestep = self.denoising_step_list[index + 1] * torch.ones(
59
+ noise.shape[:2], dtype=torch.long, device=noise.device)
60
+
61
+ noisy_image_or_video = self.scheduler.add_noise(
62
+ pred_image_or_video.flatten(0, 1),
63
+ torch.randn_like(pred_image_or_video.flatten(0, 1)),
64
+ next_timestep.flatten(0, 1)
65
+ ).unflatten(0, noise.shape[:2])
66
+
67
+ video = self.vae.decode_to_pixel(pred_image_or_video)
68
+ video = (video * 0.5 + 0.5).clamp(0, 1)
69
+ return video
exp_code/1_benchmark/CausVid/causvid/models/wan/causal_inference.py ADDED
@@ -0,0 +1,204 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models import (
2
+ get_diffusion_wrapper,
3
+ get_text_encoder_wrapper,
4
+ get_vae_wrapper
5
+ )
6
+ from typing import List, Optional
7
+ import torch
8
+
9
+
10
+ class InferencePipeline(torch.nn.Module):
11
+ def __init__(self, args, device):
12
+ super().__init__()
13
+ # Step 1: Initialize all models
14
+ self.generator_model_name = getattr(
15
+ args, "generator_name", args.model_name)
16
+ self.generator = get_diffusion_wrapper(
17
+ model_name=self.generator_model_name)()
18
+ self.text_encoder = get_text_encoder_wrapper(
19
+ model_name=args.model_name)()
20
+ self.vae = get_vae_wrapper(model_name=args.model_name)()
21
+
22
+ # Step 2: Initialize all causal hyperparmeters
23
+ self.denoising_step_list = torch.tensor(
24
+ args.denoising_step_list, dtype=torch.long, device=device)
25
+ assert self.denoising_step_list[-1] == 0
26
+ # remove the last timestep (which equals zero)
27
+ self.denoising_step_list = self.denoising_step_list[:-1]
28
+
29
+ self.scheduler = self.generator.get_scheduler()
30
+ if args.warp_denoising_step: # Warp the denoising step according to the scheduler time shift
31
+ timesteps = torch.cat((self.scheduler.timesteps.cpu(), torch.tensor([0], dtype=torch.float32))).cuda()
32
+ self.denoising_step_list = timesteps[1000 - self.denoising_step_list]
33
+
34
+ self.num_transformer_blocks = 30
35
+ self.frame_seq_length = 1560
36
+
37
+ self.kv_cache1 = None
38
+ self.kv_cache2 = None
39
+ self.args = args
40
+ self.num_frame_per_block = getattr(
41
+ args, "num_frame_per_block", 1)
42
+
43
+ print(f"KV inference with {self.num_frame_per_block} frames per block")
44
+
45
+ if self.num_frame_per_block > 1:
46
+ self.generator.model.num_frame_per_block = self.num_frame_per_block
47
+
48
+ def _initialize_kv_cache(self, batch_size, dtype, device):
49
+ """
50
+ Initialize a Per-GPU KV cache for the Wan model.
51
+ """
52
+ kv_cache1 = []
53
+
54
+ for _ in range(self.num_transformer_blocks):
55
+ kv_cache1.append({
56
+ "k": torch.zeros([batch_size, 32760, 12, 128], dtype=dtype, device=device),
57
+ "v": torch.zeros([batch_size, 32760, 12, 128], dtype=dtype, device=device)
58
+ })
59
+
60
+ self.kv_cache1 = kv_cache1 # always store the clean cache
61
+
62
+ def _initialize_crossattn_cache(self, batch_size, dtype, device):
63
+ """
64
+ Initialize a Per-GPU cross-attention cache for the Wan model.
65
+ """
66
+ crossattn_cache = []
67
+
68
+ for _ in range(self.num_transformer_blocks):
69
+ crossattn_cache.append({
70
+ "k": torch.zeros([batch_size, 512, 12, 128], dtype=dtype, device=device),
71
+ "v": torch.zeros([batch_size, 512, 12, 128], dtype=dtype, device=device),
72
+ "is_init": False
73
+ })
74
+
75
+ self.crossattn_cache = crossattn_cache # always store the clean cache
76
+
77
+ def inference(self, noise: torch.Tensor, text_prompts: List[str], start_latents: Optional[torch.Tensor] = None, return_latents: bool = False) -> torch.Tensor:
78
+ """
79
+ Perform inference on the given noise and text prompts.
80
+ Inputs:
81
+ noise (torch.Tensor): The input noise tensor of shape
82
+ (batch_size, num_frames, num_channels, height, width).
83
+ text_prompts (List[str]): The list of text prompts.
84
+ Outputs:
85
+ video (torch.Tensor): The generated video tensor of shape
86
+ (batch_size, num_frames, num_channels, height, width). It is normalized to be in the range [0, 1].
87
+ """
88
+ batch_size, num_frames, num_channels, height, width = noise.shape
89
+ conditional_dict = self.text_encoder(
90
+ text_prompts=text_prompts
91
+ )
92
+
93
+ output = torch.zeros(
94
+ [batch_size, num_frames, num_channels, height, width],
95
+ device=noise.device,
96
+ dtype=noise.dtype
97
+ )
98
+
99
+ # Step 1: Initialize KV cache
100
+ if self.kv_cache1 is None:
101
+ self._initialize_kv_cache(
102
+ batch_size=batch_size,
103
+ dtype=noise.dtype,
104
+ device=noise.device
105
+ )
106
+
107
+ self._initialize_crossattn_cache(
108
+ batch_size=batch_size,
109
+ dtype=noise.dtype,
110
+ device=noise.device
111
+ )
112
+ else:
113
+ # reset cross attn cache
114
+ for block_index in range(self.num_transformer_blocks):
115
+ self.crossattn_cache[block_index]["is_init"] = False
116
+
117
+ num_input_blocks = start_latents.shape[1] // self.num_frame_per_block if start_latents is not None else 0
118
+
119
+ # Step 2: Temporal denoising loop
120
+ num_blocks = num_frames // self.num_frame_per_block
121
+ for block_index in range(num_blocks):
122
+ noisy_input = noise[:, block_index * self.num_frame_per_block:(block_index + 1) * self.num_frame_per_block]
123
+
124
+ if start_latents is not None and block_index < num_input_blocks:
125
+ timestep = torch.ones(
126
+ [batch_size, self.num_frame_per_block], device=noise.device, dtype=torch.int64) * 0
127
+
128
+ current_ref_latents = start_latents[:, block_index * self.num_frame_per_block:(
129
+ block_index + 1) * self.num_frame_per_block]
130
+ output[:, block_index * self.num_frame_per_block:(
131
+ block_index + 1) * self.num_frame_per_block] = current_ref_latents
132
+
133
+ self.generator(
134
+ noisy_image_or_video=current_ref_latents,
135
+ conditional_dict=conditional_dict,
136
+ timestep=timestep * 0,
137
+ kv_cache=self.kv_cache1,
138
+ crossattn_cache=self.crossattn_cache,
139
+ current_start=block_index * self.num_frame_per_block * self.frame_seq_length,
140
+ current_end=(block_index + 1) *
141
+ self.num_frame_per_block * self.frame_seq_length
142
+ )
143
+ continue
144
+
145
+ # Step 2.1: Spatial denoising loop
146
+ for index, current_timestep in enumerate(self.denoising_step_list):
147
+ # set current timestep
148
+ timestep = torch.ones([batch_size, self.num_frame_per_block], device=noise.device, dtype=torch.int64) * current_timestep
149
+
150
+ if index < len(self.denoising_step_list) - 1:
151
+ denoised_pred = self.generator(
152
+ noisy_image_or_video=noisy_input,
153
+ conditional_dict=conditional_dict,
154
+ timestep=timestep,
155
+ kv_cache=self.kv_cache1,
156
+ crossattn_cache=self.crossattn_cache,
157
+ current_start=block_index * self.num_frame_per_block * self.frame_seq_length,
158
+ current_end=(
159
+ block_index + 1) * self.num_frame_per_block * self.frame_seq_length
160
+ )
161
+ next_timestep = self.denoising_step_list[index + 1]
162
+ noisy_input = self.scheduler.add_noise(
163
+ denoised_pred.flatten(0, 1),
164
+ torch.randn_like(denoised_pred.flatten(0, 1)),
165
+ next_timestep *
166
+ torch.ones([batch_size], device="cuda",
167
+ dtype=torch.long)
168
+ ).unflatten(0, denoised_pred.shape[:2])
169
+ else:
170
+ # for getting real output
171
+ denoised_pred = self.generator(
172
+ noisy_image_or_video=noisy_input,
173
+ conditional_dict=conditional_dict,
174
+ timestep=timestep,
175
+ kv_cache=self.kv_cache1,
176
+ crossattn_cache=self.crossattn_cache,
177
+ current_start=block_index * self.num_frame_per_block * self.frame_seq_length,
178
+ current_end=(
179
+ block_index + 1) * self.num_frame_per_block * self.frame_seq_length
180
+ )
181
+
182
+ # Step 2.2: rerun with timestep zero to update the cache
183
+ output[:, block_index * self.num_frame_per_block:(
184
+ block_index + 1) * self.num_frame_per_block] = denoised_pred
185
+
186
+ self.generator(
187
+ noisy_image_or_video=denoised_pred,
188
+ conditional_dict=conditional_dict,
189
+ timestep=timestep * 0,
190
+ kv_cache=self.kv_cache1,
191
+ crossattn_cache=self.crossattn_cache,
192
+ current_start=block_index * self.num_frame_per_block * self.frame_seq_length,
193
+ current_end=(block_index + 1) *
194
+ self.num_frame_per_block * self.frame_seq_length
195
+ )
196
+
197
+ # Step 3: Decode the output
198
+ video = self.vae.decode_to_pixel(output)
199
+ video = (video * 0.5 + 0.5).clamp(0, 1)
200
+
201
+ if return_latents:
202
+ return video, output
203
+ else:
204
+ return video
exp_code/1_benchmark/CausVid/causvid/models/wan/causal_model.py ADDED
@@ -0,0 +1,749 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models.wan.wan_base.modules.attention import attention
2
+ from causvid.models.wan.wan_base.modules.model import (
3
+ WanRMSNorm,
4
+ rope_apply,
5
+ WanLayerNorm,
6
+ WAN_CROSSATTENTION_CLASSES,
7
+ Head,
8
+ rope_params,
9
+ MLPProj,
10
+ sinusoidal_embedding_1d
11
+ )
12
+ from torch.nn.attention.flex_attention import create_block_mask, flex_attention
13
+ from diffusers.configuration_utils import ConfigMixin, register_to_config
14
+ from torch.nn.attention.flex_attention import BlockMask
15
+ from diffusers.models.modeling_utils import ModelMixin
16
+ import torch.nn as nn
17
+ import torch
18
+ import math
19
+
20
+ # wan 1.3B model has a weird channel / head configurations and require max-autotune to work with flexattention
21
+ # see https://github.com/pytorch/pytorch/issues/133254
22
+ # change to default for other models
23
+ flex_attention = torch.compile(
24
+ flex_attention, dynamic=False, mode="max-autotune")
25
+
26
+
27
+ def causal_rope_apply(x, grid_sizes, freqs, start_frame=0):
28
+ n, c = x.size(2), x.size(3) // 2
29
+
30
+ # split freqs
31
+ freqs = freqs.split([c - 2 * (c // 3), c // 3, c // 3], dim=1)
32
+
33
+ # loop over samples
34
+ output = []
35
+
36
+ for i, (f, h, w) in enumerate(grid_sizes.tolist()):
37
+ seq_len = f * h * w
38
+
39
+ # precompute multipliers
40
+ x_i = torch.view_as_complex(x[i, :seq_len].to(torch.float64).reshape(
41
+ seq_len, n, -1, 2))
42
+ freqs_i = torch.cat([
43
+ freqs[0][start_frame:start_frame + f].view(f, 1, 1, -1).expand(f, h, w, -1),
44
+ freqs[1][:h].view(1, h, 1, -1).expand(f, h, w, -1),
45
+ freqs[2][:w].view(1, 1, w, -1).expand(f, h, w, -1)
46
+ ],
47
+ dim=-1).reshape(seq_len, 1, -1)
48
+
49
+ # apply rotary embedding
50
+ x_i = torch.view_as_real(x_i * freqs_i).flatten(2)
51
+ x_i = torch.cat([x_i, x[i, seq_len:]])
52
+
53
+ # append to collection
54
+ output.append(x_i)
55
+ return torch.stack(output).type_as(x)
56
+
57
+
58
+ class CausalWanSelfAttention(nn.Module):
59
+
60
+ def __init__(self,
61
+ dim,
62
+ num_heads,
63
+ window_size=(-1, -1),
64
+ qk_norm=True,
65
+ eps=1e-6):
66
+ assert dim % num_heads == 0
67
+ super().__init__()
68
+ self.dim = dim
69
+ self.num_heads = num_heads
70
+ self.head_dim = dim // num_heads
71
+ self.window_size = window_size
72
+ self.qk_norm = qk_norm
73
+ self.eps = eps
74
+
75
+ # layers
76
+ self.q = nn.Linear(dim, dim)
77
+ self.k = nn.Linear(dim, dim)
78
+ self.v = nn.Linear(dim, dim)
79
+ self.o = nn.Linear(dim, dim)
80
+ self.norm_q = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
81
+ self.norm_k = WanRMSNorm(dim, eps=eps) if qk_norm else nn.Identity()
82
+
83
+ def forward(self, x, seq_lens, grid_sizes, freqs, block_mask, kv_cache=None, current_start=0, current_end=0):
84
+ r"""
85
+ Args:
86
+ x(Tensor): Shape [B, L, num_heads, C / num_heads]
87
+ seq_lens(Tensor): Shape [B]
88
+ grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
89
+ freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
90
+ block_mask (BlockMask)
91
+ """
92
+ b, s, n, d = *x.shape[:2], self.num_heads, self.head_dim
93
+
94
+ # query, key, value function
95
+ def qkv_fn(x):
96
+ q = self.norm_q(self.q(x)).view(b, s, n, d)
97
+ k = self.norm_k(self.k(x)).view(b, s, n, d)
98
+ v = self.v(x).view(b, s, n, d)
99
+ return q, k, v
100
+
101
+ q, k, v = qkv_fn(x)
102
+
103
+ if kv_cache is None:
104
+ roped_query = rope_apply(q, grid_sizes, freqs).type_as(v)
105
+ roped_key = rope_apply(k, grid_sizes, freqs).type_as(v)
106
+
107
+ padded_length = math.ceil(q.shape[1] / 128) * 128 - q.shape[1]
108
+ padded_roped_query = torch.cat(
109
+ [roped_query,
110
+ torch.zeros([q.shape[0], padded_length, q.shape[2], q.shape[3]],
111
+ device=q.device, dtype=v.dtype)],
112
+ dim=1
113
+ )
114
+
115
+ padded_roped_key = torch.cat(
116
+ [roped_key, torch.zeros([k.shape[0], padded_length, k.shape[2], k.shape[3]],
117
+ device=k.device, dtype=v.dtype)],
118
+ dim=1
119
+ )
120
+
121
+ padded_v = torch.cat(
122
+ [v, torch.zeros([v.shape[0], padded_length, v.shape[2], v.shape[3]],
123
+ device=v.device, dtype=v.dtype)],
124
+ dim=1
125
+ )
126
+
127
+ # print(q.shape, k.shape, v.shape, padded_roped_query.shape, padded_roped_key.shape, padded_v.shape)
128
+ x = flex_attention(
129
+ query=padded_roped_query.transpose(2, 1),
130
+ key=padded_roped_key.transpose(2, 1),
131
+ value=padded_v.transpose(2, 1),
132
+ block_mask=block_mask
133
+ )[:, :, :-padded_length].transpose(2, 1)
134
+ else:
135
+ roped_query = causal_rope_apply(
136
+ q, grid_sizes, freqs, start_frame=current_start // math.prod(grid_sizes[0][1:]).item()).type_as(v)
137
+ roped_key = causal_rope_apply(
138
+ k, grid_sizes, freqs, start_frame=current_start // math.prod(grid_sizes[0][1:]).item()).type_as(v)
139
+
140
+ kv_cache["k"][:, current_start:current_end] = roped_key
141
+ kv_cache["v"][:, current_start:current_end] = v
142
+
143
+ x = attention(roped_query, kv_cache["k"][:, :current_end], kv_cache["v"][:, :current_end])
144
+
145
+ # print(x.shape, q.shape, k.shape, v.shape, roped_query.shape, roped_key.shape, kv_cache["k"][:, :current_end].shape, kv_cache["v"][:, :current_end].shape)
146
+
147
+ # output
148
+ x = x.flatten(2)
149
+ x = self.o(x)
150
+ return x
151
+
152
+
153
+ class CausalWanAttentionBlock(nn.Module):
154
+
155
+ def __init__(self,
156
+ cross_attn_type,
157
+ dim,
158
+ ffn_dim,
159
+ num_heads,
160
+ window_size=(-1, -1),
161
+ qk_norm=True,
162
+ cross_attn_norm=False,
163
+ eps=1e-6):
164
+ super().__init__()
165
+ self.dim = dim
166
+ self.ffn_dim = ffn_dim
167
+ self.num_heads = num_heads
168
+ self.window_size = window_size
169
+ self.qk_norm = qk_norm
170
+ self.cross_attn_norm = cross_attn_norm
171
+ self.eps = eps
172
+
173
+ # layers
174
+ self.norm1 = WanLayerNorm(dim, eps)
175
+ self.self_attn = CausalWanSelfAttention(dim, num_heads, window_size, qk_norm,
176
+ eps)
177
+ self.norm3 = WanLayerNorm(
178
+ dim, eps,
179
+ elementwise_affine=True) if cross_attn_norm else nn.Identity()
180
+ self.cross_attn = WAN_CROSSATTENTION_CLASSES[cross_attn_type](dim,
181
+ num_heads,
182
+ (-1, -1),
183
+ qk_norm,
184
+ eps)
185
+ self.norm2 = WanLayerNorm(dim, eps)
186
+ self.ffn = nn.Sequential(
187
+ nn.Linear(dim, ffn_dim), nn.GELU(approximate='tanh'),
188
+ nn.Linear(ffn_dim, dim))
189
+
190
+ # modulation
191
+ self.modulation = nn.Parameter(torch.randn(1, 6, dim) / dim**0.5)
192
+
193
+ def forward(
194
+ self,
195
+ x,
196
+ e,
197
+ seq_lens,
198
+ grid_sizes,
199
+ freqs,
200
+ context,
201
+ context_lens,
202
+ block_mask,
203
+ kv_cache=None,
204
+ crossattn_cache=None,
205
+ current_start=0,
206
+ current_end=0
207
+ ):
208
+ r"""
209
+ Args:
210
+ x(Tensor): Shape [B, L, C] # torch.Size([1, 32760, 1536])
211
+ e(Tensor): Shape [B, F, 6, C] # torch.Size([1, 21, 6, 1536])
212
+ seq_lens(Tensor): Shape [B], length of each sequence in batch
213
+ grid_sizes(Tensor): Shape [B, 3], the second dimension contains (F, H, W)
214
+ freqs(Tensor): Rope freqs, shape [1024, C / num_heads / 2]
215
+ """
216
+ num_frames, frame_seqlen = e.shape[1], x.shape[1] // e.shape[1] # 21, 32760 / 21 = 1560
217
+ # assert e.dtype == torch.float32
218
+ # with amp.autocast(dtype=torch.float32):
219
+ e = (self.modulation.unsqueeze(1) + e).chunk(6, dim=2) # [torch.Size([1, 21, 1, 1536]) x 6]
220
+ # assert e[0].dtype == torch.float32
221
+
222
+ # self-attention
223
+ y = self.self_attn(
224
+ (self.norm1(x).unflatten(dim=1, sizes=(num_frames, frame_seqlen)) # torch.Size([1, 32760, 1536]) -> torch.Size([1, 21, 1560, 1536]) -> torch.Size([1, 32760, 1536])
225
+ * (1 + e[1]) + e[0]).flatten(1, 2),
226
+ seq_lens, # tensor([32760])
227
+ grid_sizes, # tensor([[21, 30, 52]])
228
+ freqs, # torch.Size([1024, 64])
229
+ block_mask, # (1, 1, 32768, 32768)
230
+ kv_cache,
231
+ current_start,
232
+ current_end
233
+ )
234
+
235
+ # with amp.autocast(dtype=torch.float32):
236
+ x = x + (y.unflatten(dim=1, sizes=(num_frames, frame_seqlen))* e[2]).flatten(1, 2) # torch.Size([1, 32760, 1536]) -> torch.Size([1, 21, 1560, 1536]) -> torch.Size([1, 32760, 1536])
237
+
238
+ # cross-attention & ffn function
239
+ def cross_attn_ffn(x, context, context_lens, e, crossattn_cache=None):
240
+ x = x + self.cross_attn(self.norm3(x), context,
241
+ context_lens, crossattn_cache=crossattn_cache)
242
+ y = self.ffn(
243
+ (self.norm2(x).unflatten(dim=1, sizes=(num_frames,
244
+ frame_seqlen)) * (1 + e[4]) + e[3]).flatten(1, 2) # torch.Size([1, 32760, 1536]) -> torch.Size([1, 21, 1560, 1536]) -> torch.Size([1, 32760, 1536])
245
+ )
246
+ # with amp.autocast(dtype=torch.float32):
247
+ x = x + (y.unflatten(dim=1, sizes=(num_frames,
248
+ frame_seqlen)) * e[5]).flatten(1, 2) # torch.Size([1, 32760, 1536]) -> torch.Size([1, 21, 1560, 1536]) -> torch.Size([1, 32760, 1536])
249
+ return x
250
+
251
+ x = cross_attn_ffn(x, context, context_lens, e, crossattn_cache)
252
+ return x
253
+
254
+
255
+ class CausalHead(nn.Module):
256
+
257
+ def __init__(self, dim, out_dim, patch_size, eps=1e-6):
258
+ super().__init__()
259
+ self.dim = dim
260
+ self.out_dim = out_dim
261
+ self.patch_size = patch_size
262
+ self.eps = eps
263
+
264
+ # layers
265
+ out_dim = math.prod(patch_size) * out_dim
266
+ self.norm = WanLayerNorm(dim, eps)
267
+ self.head = nn.Linear(dim, out_dim)
268
+
269
+ # modulation
270
+ self.modulation = nn.Parameter(torch.randn(1, 2, dim) / dim**0.5)
271
+
272
+ def forward(self, x, e):
273
+ r"""
274
+ Args:
275
+ x(Tensor): Shape [B, L1, C]
276
+ e(Tensor): Shape [B, F, 1, C]
277
+ """
278
+ # assert e.dtype == torch.float32
279
+ # with amp.autocast(dtype=torch.float32):
280
+ num_frames, frame_seqlen = e.shape[1], x.shape[1] // e.shape[1]
281
+ e = (self.modulation.unsqueeze(1) + e).chunk(2, dim=2)
282
+ x = (self.head(
283
+ self.norm(x).unflatten(dim=1, sizes=(num_frames, frame_seqlen)) *
284
+ (1 + e[1]) + e[0]))
285
+ return x
286
+
287
+
288
+ class CausalWanModel(ModelMixin, ConfigMixin):
289
+ r"""
290
+ Wan diffusion backbone supporting both text-to-video and image-to-video.
291
+ """
292
+
293
+ ignore_for_config = [
294
+ 'patch_size', 'cross_attn_norm', 'qk_norm', 'text_dim', 'window_size'
295
+ ]
296
+ _no_split_modules = ['WanAttentionBlock']
297
+ _supports_gradient_checkpointing = True
298
+
299
+ @register_to_config
300
+ def __init__(self,
301
+ model_type='t2v',
302
+ patch_size=(1, 2, 2),
303
+ text_len=512,
304
+ in_dim=16,
305
+ dim=2048,
306
+ ffn_dim=8192,
307
+ freq_dim=256,
308
+ text_dim=4096,
309
+ out_dim=16,
310
+ num_heads=16,
311
+ num_layers=32,
312
+ window_size=(-1, -1),
313
+ qk_norm=True,
314
+ cross_attn_norm=True,
315
+ eps=1e-6):
316
+ r"""
317
+ Initialize the diffusion model backbone.
318
+
319
+ Args:
320
+ model_type (`str`, *optional*, defaults to 't2v'):
321
+ Model variant - 't2v' (text-to-video) or 'i2v' (image-to-video)
322
+ patch_size (`tuple`, *optional*, defaults to (1, 2, 2)):
323
+ 3D patch dimensions for video embedding (t_patch, h_patch, w_patch)
324
+ text_len (`int`, *optional*, defaults to 512):
325
+ Fixed length for text embeddings
326
+ in_dim (`int`, *optional*, defaults to 16):
327
+ Input video channels (C_in)
328
+ dim (`int`, *optional*, defaults to 2048):
329
+ Hidden dimension of the transformer
330
+ ffn_dim (`int`, *optional*, defaults to 8192):
331
+ Intermediate dimension in feed-forward network
332
+ freq_dim (`int`, *optional*, defaults to 256):
333
+ Dimension for sinusoidal time embeddings
334
+ text_dim (`int`, *optional*, defaults to 4096):
335
+ Input dimension for text embeddings
336
+ out_dim (`int`, *optional*, defaults to 16):
337
+ Output video channels (C_out)
338
+ num_heads (`int`, *optional*, defaults to 16):
339
+ Number of attention heads
340
+ num_layers (`int`, *optional*, defaults to 32):
341
+ Number of transformer blocks
342
+ window_size (`tuple`, *optional*, defaults to (-1, -1)):
343
+ Window size for local attention (-1 indicates global attention)
344
+ qk_norm (`bool`, *optional*, defaults to True):
345
+ Enable query/key normalization
346
+ cross_attn_norm (`bool`, *optional*, defaults to False):
347
+ Enable cross-attention normalization
348
+ eps (`float`, *optional*, defaults to 1e-6):
349
+ Epsilon value for normalization layers
350
+ """
351
+
352
+ super().__init__()
353
+
354
+ assert model_type in ['t2v', 'i2v']
355
+ self.model_type = model_type
356
+
357
+ self.patch_size = patch_size
358
+ self.text_len = text_len
359
+ self.in_dim = in_dim
360
+ self.dim = dim
361
+ self.ffn_dim = ffn_dim
362
+ self.freq_dim = freq_dim
363
+ self.text_dim = text_dim
364
+ self.out_dim = out_dim
365
+ self.num_heads = num_heads
366
+ self.num_layers = num_layers
367
+ self.window_size = window_size
368
+ self.qk_norm = qk_norm
369
+ self.cross_attn_norm = cross_attn_norm
370
+ self.eps = eps
371
+
372
+ # embeddings
373
+ self.patch_embedding = nn.Conv3d(
374
+ in_dim, dim, kernel_size=patch_size, stride=patch_size)
375
+ self.text_embedding = nn.Sequential(
376
+ nn.Linear(text_dim, dim), nn.GELU(approximate='tanh'),
377
+ nn.Linear(dim, dim))
378
+
379
+ self.time_embedding = nn.Sequential(
380
+ nn.Linear(freq_dim, dim), nn.SiLU(), nn.Linear(dim, dim))
381
+ self.time_projection = nn.Sequential(
382
+ nn.SiLU(), nn.Linear(dim, dim * 6))
383
+
384
+ # blocks
385
+ cross_attn_type = 't2v_cross_attn' if model_type == 't2v' else 'i2v_cross_attn'
386
+ self.blocks = nn.ModuleList([
387
+ CausalWanAttentionBlock(cross_attn_type, dim, ffn_dim, num_heads,
388
+ window_size, qk_norm, cross_attn_norm, eps)
389
+ for _ in range(num_layers)
390
+ ])
391
+
392
+ # head
393
+ self.head = CausalHead(dim, out_dim, patch_size, eps)
394
+
395
+ # buffers (don't use register_buffer otherwise dtype will be changed in to())
396
+ assert (dim % num_heads) == 0 and (dim // num_heads) % 2 == 0
397
+ d = dim // num_heads
398
+ self.freqs = torch.cat([
399
+ rope_params(1024, d - 4 * (d // 6)),
400
+ rope_params(1024, 2 * (d // 6)),
401
+ rope_params(1024, 2 * (d // 6))
402
+ ],
403
+ dim=1)
404
+
405
+ if model_type == 'i2v':
406
+ self.img_emb = MLPProj(1280, dim)
407
+
408
+ # initialize weights
409
+ self.init_weights()
410
+
411
+ self.gradient_checkpointing = False
412
+
413
+ self.block_mask = None
414
+
415
+ self.num_frame_per_block = 1
416
+
417
+ def _set_gradient_checkpointing(self, module, value=False):
418
+ self.gradient_checkpointing = value
419
+
420
+ @staticmethod
421
+ def _prepare_blockwise_causal_attn_mask(
422
+ device: torch.device | str, num_frames: int = 21,
423
+ frame_seqlen: int = 1560, num_frame_per_block=1
424
+ ) -> BlockMask:
425
+ """
426
+ we will divide the token sequence into the following format
427
+ [1 latent frame] [1 latent frame] ... [1 latent frame]
428
+ We use flexattention to construct the attention mask
429
+ """
430
+ total_length = num_frames * frame_seqlen
431
+
432
+ # we do right padding to get to a multiple of 128
433
+ padded_length = math.ceil(total_length / 128) * 128 - total_length
434
+
435
+ ends = torch.zeros(total_length + padded_length,
436
+ device=device, dtype=torch.long)
437
+
438
+ # Block-wise causal mask will attend to all elements that are before the end of the current chunk
439
+ frame_indices = torch.arange(
440
+ start=0,
441
+ end=total_length,
442
+ step=frame_seqlen * num_frame_per_block,
443
+ device=device
444
+ )
445
+
446
+ for tmp in frame_indices:
447
+ ends[tmp:tmp + frame_seqlen * num_frame_per_block] = tmp + \
448
+ frame_seqlen * num_frame_per_block
449
+
450
+ def attention_mask(b, h, q_idx, kv_idx):
451
+ return (kv_idx < ends[q_idx]) | (q_idx == kv_idx)
452
+ # return ((kv_idx < total_length) & (q_idx < total_length)) | (q_idx == kv_idx) # bidirectional mask
453
+
454
+ block_mask = create_block_mask(attention_mask, B=None, H=None, Q_LEN=total_length + padded_length,
455
+ KV_LEN=total_length + padded_length, _compile=False, device=device)
456
+
457
+ import torch.distributed as dist
458
+ if not dist.is_initialized() or dist.get_rank() == 0:
459
+ print(
460
+ f" cache a block wise causal mask with block size of {num_frame_per_block} frames")
461
+ print(block_mask)
462
+
463
+ return block_mask
464
+
465
+ def _forward_inference(
466
+ self,
467
+ x,
468
+ t,
469
+ context,
470
+ seq_len,
471
+ clip_fea=None,
472
+ y=None,
473
+ kv_cache: dict = None,
474
+ crossattn_cache: dict = None,
475
+ current_start: int = 0,
476
+ current_end: int = 0
477
+ ):
478
+ r"""
479
+ Run the diffusion model with kv caching.
480
+ See Algorithm 2 of CausVid paper https://arxiv.org/abs/2412.07772 for details.
481
+ This function will be run for num_frame times.
482
+ Process the latent frames one by one (1560 tokens each)
483
+
484
+ Args:
485
+ x (List[Tensor]):
486
+ List of input video tensors, each with shape [C_in, F, H, W]
487
+ t (Tensor):
488
+ Diffusion timesteps tensor of shape [B]
489
+ context (List[Tensor]):
490
+ List of text embeddings each with shape [L, C]
491
+ seq_len (`int`):
492
+ Maximum sequence length for positional encoding
493
+ clip_fea (Tensor, *optional*):
494
+ CLIP image features for image-to-video mode
495
+ y (List[Tensor], *optional*):
496
+ Conditional video inputs for image-to-video mode, same shape as x
497
+
498
+ Returns:
499
+ List[Tensor]:
500
+ List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
501
+ """
502
+ if self.model_type == 'i2v':
503
+ assert clip_fea is not None and y is not None
504
+ # params
505
+ device = self.patch_embedding.weight.device
506
+ if self.freqs.device != device:
507
+ self.freqs = self.freqs.to(device)
508
+
509
+ if y is not None:
510
+ x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
511
+
512
+ # embeddings
513
+ x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
514
+ grid_sizes = torch.stack(
515
+ [torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
516
+ x = [u.flatten(2).transpose(1, 2) for u in x]
517
+ seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
518
+ assert seq_lens.max() <= seq_len
519
+ x = torch.cat(x)
520
+ """
521
+ torch.cat([
522
+ torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))],
523
+ dim=1) for u in x
524
+ ])
525
+ """
526
+
527
+ # time embeddings
528
+ # with amp.autocast(dtype=torch.float32):
529
+ e = self.time_embedding(sinusoidal_embedding_1d(self.freq_dim, t.flatten()).type_as(x))
530
+ e0 = self.time_projection(e).unflatten(1, (6, self.dim)).unflatten(dim=0, sizes=t.shape)
531
+ # assert e.dtype == torch.float32 and e0.dtype == torch.float32
532
+
533
+ # context
534
+ context_lens = None
535
+ context = self.text_embedding(
536
+ torch.stack([
537
+ torch.cat(
538
+ [u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
539
+ for u in context
540
+ ]))
541
+
542
+ if clip_fea is not None:
543
+ context_clip = self.img_emb(clip_fea) # bs x 257 x dim
544
+ context = torch.concat([context_clip, context], dim=1)
545
+
546
+ # arguments
547
+ kwargs = dict(
548
+ e=e0,
549
+ seq_lens=seq_lens,
550
+ grid_sizes=grid_sizes,
551
+ freqs=self.freqs,
552
+ context=context,
553
+ context_lens=context_lens,
554
+ block_mask=self.block_mask
555
+ )
556
+
557
+ def create_custom_forward(module):
558
+ def custom_forward(*inputs, **kwargs):
559
+ return module(*inputs, **kwargs)
560
+ return custom_forward
561
+
562
+ for block_index, block in enumerate(self.blocks):
563
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
564
+ assert False
565
+ else:
566
+ kwargs.update(
567
+ {
568
+ "kv_cache": kv_cache[block_index],
569
+ "crossattn_cache": crossattn_cache[block_index],
570
+ "current_start": current_start,
571
+ "current_end": current_end
572
+ }
573
+ )
574
+ x = block(x, **kwargs)
575
+
576
+ # head
577
+ x = self.head(x, e.unflatten(dim=0, sizes=t.shape).unsqueeze(2))
578
+
579
+ # unpatchify
580
+ x = self.unpatchify(x, grid_sizes)
581
+ return torch.stack(x)
582
+
583
+ def _forward_train(
584
+ self,
585
+ x,
586
+ t,
587
+ context,
588
+ seq_len,
589
+ clip_fea=None,
590
+ y=None,
591
+ ):
592
+ r"""
593
+ Forward pass through the diffusion model
594
+
595
+ Args:
596
+ x (List[Tensor]):
597
+ List of input video tensors, each with shape [C_in, F, H, W]
598
+ t (Tensor):
599
+ Diffusion timesteps tensor of shape [B]
600
+ context (List[Tensor]):
601
+ List of text embeddings each with shape [L, C]
602
+ seq_len (`int`):
603
+ Maximum sequence length for positional encoding
604
+ clip_fea (Tensor, *optional*):
605
+ CLIP image features for image-to-video mode
606
+ y (List[Tensor], *optional*):
607
+ Conditional video inputs for image-to-video mode, same shape as x
608
+
609
+ Returns:
610
+ List[Tensor]:
611
+ List of denoised video tensors with original input shapes [C_out, F, H / 8, W / 8]
612
+ """
613
+ if self.model_type == 'i2v':
614
+ assert clip_fea is not None and y is not None
615
+ # params
616
+ device = self.patch_embedding.weight.device
617
+ if self.freqs.device != device:
618
+ self.freqs = self.freqs.to(device)
619
+
620
+ # Construct blockwise causal attn mask
621
+ if self.block_mask is None:
622
+ self.block_mask = self._prepare_blockwise_causal_attn_mask(
623
+ device, num_frames=x.shape[2],
624
+ frame_seqlen=x.shape[-2] *
625
+ x.shape[-1] // (self.patch_size[1] * self.patch_size[2]),
626
+ num_frame_per_block=self.num_frame_per_block
627
+ )
628
+
629
+ if y is not None:
630
+ x = [torch.cat([u, v], dim=0) for u, v in zip(x, y)]
631
+
632
+ # embeddings
633
+ x = [self.patch_embedding(u.unsqueeze(0)) for u in x]
634
+ grid_sizes = torch.stack(
635
+ [torch.tensor(u.shape[2:], dtype=torch.long) for u in x])
636
+ x = [u.flatten(2).transpose(1, 2) for u in x]
637
+ seq_lens = torch.tensor([u.size(1) for u in x], dtype=torch.long)
638
+ assert seq_lens.max() <= seq_len
639
+ x = torch.cat([torch.cat([u, u.new_zeros(1, seq_len - u.size(1), u.size(2))], dim=1) for u in x])
640
+
641
+ # time embeddings
642
+ # with amp.autocast(dtype=torch.float32):
643
+ e = self.time_embedding(sinusoidal_embedding_1d(self.freq_dim, t.flatten()).type_as(x)) # [1, 21] -> [21, 1536]
644
+ e0 = self.time_projection(e).unflatten(1, (6, self.dim)).unflatten(dim=0, sizes=t.shape) # [1, 21, 6, 1536]
645
+ # assert e.dtype == torch.float32 and e0.dtype == torch.float32
646
+
647
+ # context
648
+ context_lens = None
649
+ context = self.text_embedding(
650
+ torch.stack([
651
+ torch.cat(
652
+ [u, u.new_zeros(self.text_len - u.size(0), u.size(1))])
653
+ for u in context
654
+ ]))
655
+
656
+ if clip_fea is not None:
657
+ context_clip = self.img_emb(clip_fea) # bs x 257 x dim
658
+ context = torch.concat([context_clip, context], dim=1)
659
+
660
+ # arguments
661
+ kwargs = dict(
662
+ e=e0,
663
+ seq_lens=seq_lens,
664
+ grid_sizes=grid_sizes,
665
+ freqs=self.freqs,
666
+ context=context,
667
+ context_lens=context_lens,
668
+ block_mask=self.block_mask)
669
+
670
+ def create_custom_forward(module):
671
+ def custom_forward(*inputs, **kwargs):
672
+ return module(*inputs, **kwargs)
673
+ return custom_forward
674
+
675
+ for block in self.blocks:
676
+ if torch.is_grad_enabled() and self.gradient_checkpointing:
677
+ x = torch.utils.checkpoint.checkpoint(
678
+ create_custom_forward(block),
679
+ x, **kwargs,
680
+ use_reentrant=False,
681
+ )
682
+ else:
683
+ x = block(x, **kwargs)
684
+
685
+ # head
686
+ x = self.head(x, e.unflatten(dim=0, sizes=t.shape).unsqueeze(2))
687
+
688
+ # unpatchify
689
+ x = self.unpatchify(x, grid_sizes)
690
+ return torch.stack(x)
691
+
692
+ def forward(
693
+ self,
694
+ *args,
695
+ **kwargs
696
+ ):
697
+ if kwargs.get('kv_cache', None) is not None:
698
+ return self._forward_inference(*args, **kwargs)
699
+ else:
700
+ return self._forward_train(*args, **kwargs)
701
+
702
+ def unpatchify(self, x, grid_sizes):
703
+ r"""
704
+ Reconstruct video tensors from patch embeddings.
705
+
706
+ Args:
707
+ x (List[Tensor]):
708
+ List of patchified features, each with shape [L, C_out * prod(patch_size)]
709
+ grid_sizes (Tensor):
710
+ Original spatial-temporal grid dimensions before patching,
711
+ shape [B, 3] (3 dimensions correspond to F_patches, H_patches, W_patches)
712
+
713
+ Returns:
714
+ List[Tensor]:
715
+ Reconstructed video tensors with shape [C_out, F, H / 8, W / 8]
716
+ """
717
+
718
+ c = self.out_dim
719
+ out = []
720
+ for u, v in zip(x, grid_sizes.tolist()):
721
+ u = u[:math.prod(v)].view(*v, *self.patch_size, c)
722
+ u = torch.einsum('fhwpqrc->cfphqwr', u)
723
+ u = u.reshape(c, *[i * j for i, j in zip(v, self.patch_size)])
724
+ out.append(u)
725
+ return out
726
+
727
+ def init_weights(self):
728
+ r"""
729
+ Initialize model parameters using Xavier initialization.
730
+ """
731
+
732
+ # basic init
733
+ for m in self.modules():
734
+ if isinstance(m, nn.Linear):
735
+ nn.init.xavier_uniform_(m.weight)
736
+ if m.bias is not None:
737
+ nn.init.zeros_(m.bias)
738
+
739
+ # init embeddings
740
+ nn.init.xavier_uniform_(self.patch_embedding.weight.flatten(1))
741
+ for m in self.text_embedding.modules():
742
+ if isinstance(m, nn.Linear):
743
+ nn.init.normal_(m.weight, std=.02)
744
+ for m in self.time_embedding.modules():
745
+ if isinstance(m, nn.Linear):
746
+ nn.init.normal_(m.weight, std=.02)
747
+
748
+ # init output layer
749
+ nn.init.zeros_(self.head.head.weight)
exp_code/1_benchmark/CausVid/causvid/models/wan/flow_match.py ADDED
@@ -0,0 +1,83 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ """
2
+ The following code is copied from https://github.com/modelscope/DiffSynth-Studio/blob/main/diffsynth/schedulers/flow_match.py
3
+ """
4
+ import torch
5
+
6
+
7
+ class FlowMatchScheduler():
8
+
9
+ def __init__(self, num_inference_steps=100, num_train_timesteps=1000, shift=3.0, sigma_max=1.0, sigma_min=0.003 / 1.002, inverse_timesteps=False, extra_one_step=False, reverse_sigmas=False):
10
+ self.num_train_timesteps = num_train_timesteps
11
+ self.shift = shift
12
+ self.sigma_max = sigma_max
13
+ self.sigma_min = sigma_min
14
+ self.inverse_timesteps = inverse_timesteps
15
+ self.extra_one_step = extra_one_step
16
+ self.reverse_sigmas = reverse_sigmas
17
+ self.set_timesteps(num_inference_steps)
18
+
19
+ def set_timesteps(self, num_inference_steps=100, denoising_strength=1.0, training=False):
20
+ sigma_start = self.sigma_min + \
21
+ (self.sigma_max - self.sigma_min) * denoising_strength
22
+ if self.extra_one_step:
23
+ self.sigmas = torch.linspace(
24
+ sigma_start, self.sigma_min, num_inference_steps + 1)[:-1]
25
+ else:
26
+ self.sigmas = torch.linspace(
27
+ sigma_start, self.sigma_min, num_inference_steps)
28
+ if self.inverse_timesteps:
29
+ self.sigmas = torch.flip(self.sigmas, dims=[0])
30
+ self.sigmas = self.shift * self.sigmas / \
31
+ (1 + (self.shift - 1) * self.sigmas)
32
+ if self.reverse_sigmas:
33
+ self.sigmas = 1 - self.sigmas
34
+ self.timesteps = self.sigmas * self.num_train_timesteps
35
+ if training:
36
+ x = self.timesteps
37
+ y = torch.exp(-2 * ((x - num_inference_steps / 2) /
38
+ num_inference_steps) ** 2)
39
+ y_shifted = y - y.min()
40
+ bsmntw_weighing = y_shifted * \
41
+ (num_inference_steps / y_shifted.sum())
42
+ self.linear_timesteps_weights = bsmntw_weighing
43
+
44
+ def step(self, model_output, timestep, sample, to_final=False):
45
+ self.sigmas = self.sigmas.to(model_output.device)
46
+ self.timesteps = self.timesteps.to(model_output.device)
47
+ timestep_id = torch.argmin(
48
+ (self.timesteps.unsqueeze(0) - timestep.unsqueeze(1)).abs(), dim=1)
49
+ sigma = self.sigmas[timestep_id].reshape(-1, 1, 1, 1)
50
+ if to_final or (timestep_id + 1 >= len(self.timesteps)).any():
51
+ sigma_ = 1 if (
52
+ self.inverse_timesteps or self.reverse_sigmas) else 0
53
+ else:
54
+ sigma_ = self.sigmas[timestep_id + 1].reshape(-1, 1, 1, 1)
55
+ prev_sample = sample + model_output * (sigma_ - sigma)
56
+ return prev_sample
57
+
58
+ def add_noise(self, original_samples, noise, timestep):
59
+ """
60
+ Diffusion forward corruption process.
61
+ Input:
62
+ - clean_latent: the clean latent with shape [B, C, H, W]
63
+ - noise: the noise with shape [B, C, H, W]
64
+ - timestep: the timestep with shape [B]
65
+ Output: the corrupted latent with shape [B, C, H, W]
66
+ """
67
+ self.sigmas = self.sigmas.to(noise.device)
68
+ self.timesteps = self.timesteps.to(noise.device)
69
+ timestep_id = torch.argmin(
70
+ (self.timesteps.unsqueeze(0) - timestep.unsqueeze(1)).abs(), dim=1)
71
+ sigma = self.sigmas[timestep_id].reshape(-1, 1, 1, 1)
72
+ sample = (1 - sigma) * original_samples + sigma * noise
73
+ return sample.type_as(noise)
74
+
75
+ def training_target(self, sample, noise, timestep):
76
+ target = noise - sample
77
+ return target
78
+
79
+ def training_weight(self, timestep):
80
+ timestep_id = torch.argmin(
81
+ (self.timesteps - timestep.to(self.timesteps.device)).abs())
82
+ weights = self.linear_timesteps_weights[timestep_id]
83
+ return weights
exp_code/1_benchmark/CausVid/causvid/models/wan/generate_ode_pairs.py ADDED
@@ -0,0 +1,125 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from causvid.models.wan.wan_wrapper import WanDiffusionWrapper, WanTextEncoder, WanVAEWrapper
2
+ from causvid.models.wan.flow_match import FlowMatchScheduler
3
+ from causvid.util import launch_distributed_job
4
+ from causvid.data import TextDataset
5
+ import torch.distributed as dist
6
+ from tqdm import tqdm
7
+ import argparse
8
+ import torch
9
+ import math
10
+ import os
11
+
12
+
13
+ def init_model(device):
14
+ model = WanDiffusionWrapper().to(device).to(torch.float32)
15
+ encoder = WanTextEncoder().to(device).to(torch.float32)
16
+ model.set_module_grad(
17
+ {
18
+ "model": False
19
+ }
20
+ )
21
+
22
+ scheduler = FlowMatchScheduler(
23
+ shift=8.0, sigma_min=0.0, extra_one_step=True)
24
+ scheduler.set_timesteps(num_inference_steps=50, denoising_strength=1.0)
25
+ scheduler.sigmas = scheduler.sigmas.to(device)
26
+
27
+ sample_neg_prompt = '色调艳丽,过曝,静态,细节模糊不清,字幕,风格,作品,画作,画面,静止,整体发灰,最差质量,低质量,JPEG压缩残留,丑陋的,残缺的,多余的手指,画得不好的手部,画得不好的脸部,畸形的,毁容的,形态畸形的肢体,手指融合,静止不动的画面,杂乱的背景,三条腿,背景人很多,倒着走'
28
+
29
+ unconditional_dict = encoder(
30
+ text_prompts=[sample_neg_prompt]
31
+ )
32
+
33
+ return model, encoder, scheduler, unconditional_dict
34
+
35
+
36
+ def main():
37
+ parser = argparse.ArgumentParser()
38
+ parser.add_argument("--local_rank", type=int, default=-1)
39
+ parser.add_argument("--output_folder", type=str)
40
+ parser.add_argument("--caption_path", type=str)
41
+ parser.add_argument("--guidance_scale", type=float, default=6.0)
42
+
43
+ args = parser.parse_args()
44
+
45
+ launch_distributed_job()
46
+ global_rank = dist.get_rank()
47
+
48
+ device = torch.cuda.current_device()
49
+
50
+ torch.set_grad_enabled(False)
51
+ torch.backends.cuda.matmul.allow_tf32 = True
52
+ torch.backends.cudnn.allow_tf32 = True
53
+
54
+ model, encoder, scheduler, unconditional_dict = init_model(device=device)
55
+
56
+ dataset = TextDataset(args.caption_path)
57
+
58
+ if global_rank == 0:
59
+ os.makedirs(args.output_folder, exist_ok=True)
60
+
61
+ for index in tqdm(range(int(math.ceil(len(dataset) / dist.get_world_size()))), disable=dist.get_rank() != 0):
62
+ prompt_index = index * dist.get_world_size() + dist.get_rank()
63
+ if prompt_index >= len(dataset):
64
+ continue
65
+ prompt = dataset[prompt_index]
66
+
67
+ conditional_dict = encoder(
68
+ text_prompts=prompt
69
+ )
70
+
71
+ latents = torch.randn(
72
+ [1, 21, 16, 60, 104], dtype=torch.float32, device=device
73
+ )
74
+
75
+ noisy_input = []
76
+
77
+ for progress_id, t in enumerate(tqdm(scheduler.timesteps)):
78
+ timestep = t * torch.ones([1, 21], device=device, dtype=torch.float32)
79
+
80
+ noisy_input.append(latents)
81
+
82
+ x0_pred_cond = model(
83
+ latents, conditional_dict, timestep
84
+ )
85
+
86
+ x0_pred_uncond = model(
87
+ latents, unconditional_dict, timestep
88
+ )
89
+
90
+ x0_pred = x0_pred_uncond + args.guidance_scale * (
91
+ x0_pred_cond - x0_pred_uncond
92
+ )
93
+
94
+ flow_pred = model._convert_x0_to_flow_pred(
95
+ scheduler=scheduler,
96
+ x0_pred=x0_pred.flatten(0, 1),
97
+ xt=latents.flatten(0, 1),
98
+ timestep=timestep.flatten(0, 1)
99
+ ).unflatten(0, x0_pred.shape[:2])
100
+
101
+ latents = scheduler.step(
102
+ flow_pred.flatten(0, 1),
103
+ scheduler.timesteps[progress_id] * torch.ones(
104
+ [1, 21], device=device, dtype=torch.long).flatten(0, 1),
105
+ latents.flatten(0, 1)
106
+ ).unflatten(dim=0, sizes=flow_pred.shape[:2])
107
+
108
+ noisy_input.append(latents)
109
+
110
+ noisy_inputs = torch.stack(noisy_input, dim=1)
111
+
112
+ noisy_inputs = noisy_inputs[:, [0, 36, 44, -1]]
113
+
114
+ stored_data = noisy_inputs
115
+
116
+ torch.save(
117
+ {prompt: stored_data.cpu().detach()},
118
+ os.path.join(args.output_folder, f"{prompt_index:05d}.pt")
119
+ )
120
+
121
+ dist.barrier()
122
+
123
+
124
+ if __name__ == "__main__":
125
+ main()
exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/README.md ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ Code in this folder is modified from https://github.com/Wan-Video/Wan2.1
2
+ Apache-2.0 License
exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__init__.py ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ from . import configs, distributed, modules
2
+ from .image2video import WanI2V
3
+ from .text2video import WanT2V
exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__pycache__/__init__.cpython-312.pyc ADDED
Binary file (359 Bytes). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__pycache__/image2video.cpython-312.pyc ADDED
Binary file (16.7 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/__pycache__/text2video.cpython-312.pyc ADDED
Binary file (12.3 kB). View file
 
exp_code/1_benchmark/CausVid/causvid/models/wan/wan_base/configs/__init__.py ADDED
@@ -0,0 +1,42 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2024-2025 The Alibaba Wan Team Authors. All rights reserved.
2
+ from .wan_t2v_14B import t2v_14B
3
+ from .wan_t2v_1_3B import t2v_1_3B
4
+ from .wan_i2v_14B import i2v_14B
5
+ import copy
6
+ import os
7
+
8
+ os.environ['TOKENIZERS_PARALLELISM'] = 'false'
9
+
10
+
11
+ # the config of t2i_14B is the same as t2v_14B
12
+ t2i_14B = copy.deepcopy(t2v_14B)
13
+ t2i_14B.__name__ = 'Config: Wan T2I 14B'
14
+
15
+ WAN_CONFIGS = {
16
+ 't2v-14B': t2v_14B,
17
+ 't2v-1.3B': t2v_1_3B,
18
+ 'i2v-14B': i2v_14B,
19
+ 't2i-14B': t2i_14B,
20
+ }
21
+
22
+ SIZE_CONFIGS = {
23
+ '720*1280': (720, 1280),
24
+ '1280*720': (1280, 720),
25
+ '480*832': (480, 832),
26
+ '832*480': (832, 480),
27
+ '1024*1024': (1024, 1024),
28
+ }
29
+
30
+ MAX_AREA_CONFIGS = {
31
+ '720*1280': 720 * 1280,
32
+ '1280*720': 1280 * 720,
33
+ '480*832': 480 * 832,
34
+ '832*480': 832 * 480,
35
+ }
36
+
37
+ SUPPORTED_SIZES = {
38
+ 't2v-14B': ('720*1280', '1280*720', '480*832', '832*480'),
39
+ 't2v-1.3B': ('480*832', '832*480'),
40
+ 'i2v-14B': ('720*1280', '1280*720', '480*832', '832*480'),
41
+ 't2i-14B': tuple(SIZE_CONFIGS.keys()),
42
+ }