SuperCS commited on
Commit
803b6ee
·
verified ·
1 Parent(s): 751adfb

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/memory.md +581 -0
  2. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/mps.md +82 -0
  3. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/neuron.md +59 -0
  4. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/onnx.md +82 -0
  5. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/open_vino.md +77 -0
  6. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/para_attn.md +497 -0
  7. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/pruna.md +184 -0
  8. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/speed-memory-optims.md +200 -0
  9. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/tgate.md +182 -0
  10. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/tome.md +90 -0
  11. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/xdit.md +119 -0
  12. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/xformers.md +32 -0
  13. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/adapt_a_model.md +47 -0
  14. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/controlnet.md +366 -0
  15. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/distributed_inference.md +239 -0
  16. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/dreambooth.md +643 -0
  17. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/instructpix2pix.md +255 -0
  18. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/kandinsky.md +328 -0
  19. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/lora.md +231 -0
  20. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/overview.md +60 -0
  21. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/text2image.md +275 -0
  22. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/text_inversion.md +296 -0
  23. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/wuerstchen.md +191 -0
  24. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/using-diffusers/consisid.md +100 -0
  25. exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/using-diffusers/schedulers.md +256 -0
  26. exp_code/1_benchmark/diffusers-WanS2V/examples/README.md +70 -0
  27. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/README.md +466 -0
  28. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/README_flux.md +381 -0
  29. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/requirements.txt +8 -0
  30. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/requirements_flux.txt +8 -0
  31. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/test_dreambooth_lora_flux_advanced.py +328 -0
  32. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py +0 -0
  33. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py +2081 -0
  34. exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py +0 -0
  35. exp_code/1_benchmark/diffusers-WanS2V/examples/amused/README.md +326 -0
  36. exp_code/1_benchmark/diffusers-WanS2V/examples/amused/train_amused.py +975 -0
  37. exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/README.md +238 -0
  38. exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/requirements.txt +10 -0
  39. exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/train_cogvideox_image_to_video_lora.py +1619 -0
  40. exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/train_cogvideox_lora.py +1607 -0
  41. exp_code/1_benchmark/diffusers-WanS2V/examples/cogview4-control/README.md +201 -0
  42. exp_code/1_benchmark/diffusers-WanS2V/examples/cogview4-control/requirements.txt +6 -0
  43. exp_code/1_benchmark/diffusers-WanS2V/examples/cogview4-control/train_control_cogview4.py +1243 -0
  44. exp_code/1_benchmark/diffusers-WanS2V/examples/community/README.md +0 -0
  45. exp_code/1_benchmark/diffusers-WanS2V/examples/community/README_community_scripts.md +439 -0
  46. exp_code/1_benchmark/diffusers-WanS2V/examples/community/adaptive_mask_inpainting.py +1469 -0
  47. exp_code/1_benchmark/diffusers-WanS2V/examples/community/bit_diffusion.py +264 -0
  48. exp_code/1_benchmark/diffusers-WanS2V/examples/community/checkpoint_merger.py +288 -0
  49. exp_code/1_benchmark/diffusers-WanS2V/examples/community/clip_guided_images_mixing_stable_diffusion.py +445 -0
  50. exp_code/1_benchmark/diffusers-WanS2V/examples/community/clip_guided_stable_diffusion.py +337 -0
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/memory.md ADDED
@@ -0,0 +1,581 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 HuggingFace 团队。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版本(“许可证”)授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,无任何明示或暗示的担保或条件。有关许可证的特定语言管理权限和限制,请参阅许可证。
8
+ -->
9
+
10
+ # 减少内存使用
11
+
12
+ 现代diffusion models,如 [Flux](../api/pipelines/flux) 和 [Wan](../api/pipelines/wan),拥有数十亿参数,在您的硬件上进行推理时会占用大量内存。这是一个挑战,因为常见的 GPU 通常没有足够的内存。为了克服内存限制,您可以使用多个 GPU(如果可用)、将一些管道组件卸载到 CPU 等。
13
+
14
+ 本指南将展示如何减少内存使用。
15
+
16
+ > [!TIP]
17
+ > 请记住,这些技术可能需要根据模型进行调整。例如,基于 transformer 的扩散模型可能不会像基于 UNet 的模型那样从这些内存优化中同等受益。
18
+
19
+ ## 多个 GPU
20
+
21
+ 如果您有多个 GPU 的访问权限,有几种选项可以高效地在硬件上加载和分发大型模型。这些功能由 [Accelerate](https://huggingface.co/docs/accelerate/index) 库支持,因此请确保先安装它。
22
+
23
+ ```bash
24
+ pip install -U accelerate
25
+ ```
26
+
27
+ ### 分片检查点
28
+
29
+ 将大型检查点加载到多个分片中很有用,因为分片会逐个加载。这保持了低内存使用,只需要足够的内存来容纳模型大小和最大分片大小。我们建议当 fp32 检查点大于 5GB 时进行分片。默认分片大小为 5GB。
30
+
31
+ 在 [`~DiffusionPipeline.save_pretrained`] 中使用 `max_shard_size` 参数对检查点进行分片。
32
+
33
+ ```py
34
+ from diffusers import AutoModel
35
+
36
+ unet = AutoModel.from_pretrained(
37
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet"
38
+ )
39
+ unet.save_pretrained("sdxl-unet-sharded", max_shard_size="5GB")
40
+ ```
41
+
42
+ 现在您可以使用分片检查点,而不是常规检查点,以节省内存。
43
+
44
+ ```py
45
+ import torch
46
+ from diffusers import AutoModel, StableDiffusionXLPipeline
47
+
48
+ unet = AutoModel.from_pretrained(
49
+ "username/sdxl-unet-sharded", torch_dtype=torch.float16
50
+ )
51
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
52
+ "stabilityai/stable-diffusion-xl-base-1.0",
53
+ unet=unet,
54
+ torch_dtype=torch.float16
55
+ ).to("cuda")
56
+ ```
57
+
58
+ ### 设备放置
59
+
60
+ > [!WARNING]
61
+ > 设备放置是一个实验性功能,API 可能会更改。目前仅支持 `balanced` 策略。我们计划在未来支持额外的映射策略。
62
+
63
+ `device_map` 参数控制管道或模型中的组件如何
64
+ 单个模型中的层分布在多个设备上。
65
+
66
+ <hfoptions id="device-map">
67
+ <hfoption id="pipeline level">
68
+
69
+ `balanced` 设备放置策略将管道均匀分割到所有可用设备上。
70
+
71
+ ```py
72
+ import torch
73
+ from diffusers import AutoModel, StableDiffusionXLPipeline
74
+
75
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
76
+ "stabilityai/stable-diffusion-xl-base-1.0",
77
+ torch_dtype=torch.float16,
78
+ device_map="balanced"
79
+ )
80
+ ```
81
+
82
+ 您可以使用 `hf_device_map` 检查管道的设备映射。
83
+
84
+ ```py
85
+ print(pipeline.hf_device_map)
86
+ {'unet': 1, 'vae': 1, 'safety_checker': 0, 'text_encoder': 0}
87
+ ```
88
+
89
+ </hfoption>
90
+ <hfoption id="model level">
91
+
92
+ `device_map` 对于加载大型模型非常有用,例如具有 125 亿参数的 Flux diffusion transformer。将其设置为 `"auto"` 可以自动将模型首先分布到最快的设备上,然后再移动到较慢的设备。有关更多详细信息,请参阅 [模型分片](../training/distributed_inference#model-sharding) 文档。
93
+
94
+ ```py
95
+ import torch
96
+ from diffusers import AutoModel
97
+
98
+ transformer = AutoModel.from_pretrained(
99
+ "black-forest-labs/FLUX.1-dev",
100
+ subfolder="transformer",
101
+ device_map="auto",
102
+ torch_dtype=torch.bfloat16
103
+ )
104
+ ```
105
+
106
+ 您可以使用 `hf_device_map` 检查模型的设备映射。
107
+
108
+ ```py
109
+ print(transformer.hf_device_map)
110
+ ```
111
+
112
+ </hfoption>
113
+ </hfoptions>
114
+
115
+ 当设计您自己的 `device_map` 时,它应该是一个字典,包含模型的特定模块名称或层以及设备标识符(整数表示 GPU,`cpu` 表示 CPU,`disk` 表示磁盘)。
116
+
117
+ 在模型上调用 `hf_device_map` 以查看模型层如何分布,然后设计您自己的映射。
118
+
119
+ ```py
120
+ print(transformer.hf_device_map)
121
+ {'pos_embed': 0, 'time_text_embed': 0, 'context_embedder': 0, 'x_embedder': 0, 'transformer_blocks': 0, 'single_transformer_blocks.0': 0, 'single_transformer_blocks.1': 0, 'single_transformer_blocks.2': 0, 'single_transformer_blocks.3': 0, 'single_transformer_blocks.4': 0, 'single_transformer_blocks.5': 0, 'single_transformer_blocks.6': 0, 'single_transformer_blocks.7': 0, 'single_transformer_blocks.8': 0, 'single_transformer_blocks.9': 0, 'single_transformer_blocks.10': 'cpu', 'single_transformer_blocks.11': 'cpu', 'single_transformer_blocks.12': 'cpu', 'single_transformer_blocks.13': 'cpu', 'single_transformer_blocks.14': 'cpu', 'single_transformer_blocks.15': 'cpu', 'single_transformer_blocks.16': 'cpu', 'single_transformer_blocks.17': 'cpu', 'single_transformer_blocks.18': 'cpu', 'single_transformer_blocks.19': 'cpu', 'single_transformer_blocks.20': 'cpu', 'single_transformer_blocks.21': 'cpu', 'single_transformer_blocks.22': 'cpu', 'single_transformer_blocks.23': 'cpu', 'single_transformer_blocks.24': 'cpu', 'single_transformer_blocks.25': 'cpu', 'single_transformer_blocks.26': 'cpu', 'single_transformer_blocks.27': 'cpu', 'single_transformer_blocks.28': 'cpu', 'single_transformer_blocks.29': 'cpu', 'single_transformer_blocks.30': 'cpu', 'single_transformer_blocks.31': 'cpu', 'single_transformer_blocks.32': 'cpu', 'single_transformer_blocks.33': 'cpu', 'single_transformer_blocks.34': 'cpu', 'single_transformer_blocks.35': 'cpu', 'single_transformer_blocks.36': 'cpu', 'single_transformer_blocks.37': 'cpu', 'norm_out': 'cpu', 'proj_out': 'cpu'}
122
+ ```
123
+
124
+ 例如,下面的 `device_map` 将 `single_transformer_blocks.10` 到 `single_transformer_blocks.20` 放置在第二个 GPU(`1`)上。
125
+
126
+ ```py
127
+ import torch
128
+ from diffusers import AutoModel
129
+
130
+ device_map = {
131
+ 'pos_embed': 0, 'time_text_embed': 0, 'context_embedder': 0, 'x_embedder': 0, 'transformer_blocks': 0, 'single_transformer_blocks.0': 0, 'single_transformer_blocks.1': 0, 'single_transformer_blocks.2': 0, 'single_transformer_blocks.3': 0, 'single_transformer_blocks.4': 0, 'single_transformer_blocks.5': 0, 'single_transformer_blocks.6': 0, 'single_transformer_blocks.7': 0, 'single_transformer_blocks.8': 0, 'single_transformer_blocks.9': 0, 'single_transformer_blocks.10': 1, 'single_transformer_blocks.11': 1, 'single_transformer_blocks.12': 1, 'single_transformer_blocks.13': 1, 'single_transformer_blocks.14': 1, 'single_transformer_blocks.15': 1, 'single_transformer_blocks.16': 1, 'single_transformer_blocks.17': 1, 'single_transformer_blocks.18': 1, 'single_transformer_blocks.19': 1, 'single_transformer_blocks.20': 1, 'single_transformer_blocks.21': 'cpu', 'single_transformer_blocks.22': 'cpu', 'single_transformer_blocks.23': 'cpu', 'single_transformer_blocks.24': 'cpu', 'single_transformer_blocks.25': 'cpu', 'single_transformer_blocks.26': 'cpu', 'single_transformer_blocks.27': 'cpu', 'single_transformer_blocks.28': 'cpu', 'single_transformer_blocks.29': 'cpu', 'single_transformer_blocks.30': 'cpu', 'single_transformer_blocks.31': 'cpu', 'single_transformer_blocks.32': 'cpu', 'single_transformer_blocks.33': 'cpu', 'single_transformer_blocks.34': 'cpu', 'single_transformer_blocks.35': 'cpu', 'single_transformer_blocks.36': 'cpu', 'single_transformer_blocks.37': 'cpu', 'norm_out': 'cpu', 'proj_out': 'cpu'
132
+ }
133
+
134
+ transformer = AutoModel.from_pretrained(
135
+ "black-forest-labs/FLUX.1-dev",
136
+ subfolder="transformer",
137
+ device_map=device_map,
138
+ torch_dtype=torch.bfloat16
139
+ )
140
+ ```
141
+
142
+ 传递一个字典,将最大内存使用量映射到每个设备以强制执行限制。如果设备不在 `max_memory` 中,它将被忽略,管道组件不会分发到该设备。
143
+
144
+ ```py
145
+ import torch
146
+ from diffusers import AutoModel, StableDiffusionXLPipeline
147
+
148
+ max_memory = {0:"1GB", 1:"1GB"}
149
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
150
+ "stabilityai/stable-diffusion-xl-base-1.0",
151
+ torch_dtype=torch.float16,
152
+ device_map="balanced",
153
+ max_memory=max_memory
154
+ )
155
+ ```
156
+
157
+ Diffusers 默认使用所有设备的最大内存,但如果它们无法适应 GPU,则需要使用单个 GPU 并通过以下方法卸载到 CPU。
158
+
159
+ - [`~DiffusionPipeline.enable_model_cpu_offload`] 仅适用于单个 GPU,但非常大的模型可能无法适应它
160
+ - 使用 [`~DiffusionPipeline.enable_sequential_cpu_offload`] 可能有效,但它极其缓慢,并且仅限于单个 GPU。
161
+
162
+ 使用 [`~DiffusionPipeline.reset_device_map`] 方法来重置 `device_map`。如果您想在已进行设备映射的管道上使用方法如 `.to()`、[`~DiffusionPipeline.enable_sequential_cpu_offload`] 和 [`~DiffusionPipeline.enable_model_cpu_offload`],这是必要的。
163
+
164
+ ```py
165
+ pipeline.reset_device_map()
166
+ ```
167
+
168
+ ## VAE 切片
169
+
170
+ VAE 切片通过将大批次输入拆分为单个数据批次并分别处理它们来节省内存。这种方法在同时生成多个图像时效果最佳。
171
+
172
+ 例如,如果您同时生成 4 个图像,解码会将峰值激活内存增加 4 倍。VAE 切片通过一次只解码 1 个图像而不是所有 4 个图像来减少这种情况。
173
+
174
+ 调用 [`~StableDiffusionPipeline.enable_vae_slicing`] 来启用切片 VAE。您可以预期在解码多图像批次时性能会有小幅提升,而在单图像批次时没有性能影响。
175
+
176
+ ```py
177
+ import torch
178
+ from diffusers import AutoModel, StableDiffusionXLPipeline
179
+
180
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
181
+ "stabilityai/stable-diffusion-xl-base-1.0",
182
+ torch_dtype=torch.float16,
183
+ ).to("cuda")
184
+ pipeline.enable_vae_slicing()
185
+ pipeline(["An astronaut riding a horse on Mars"]*32).images[0]
186
+ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
187
+ ```
188
+
189
+ > [!WARNING]
190
+ > [`AutoencoderKLWan`] 和 [`AsymmetricAutoencoderKL`] 类不支持切片。
191
+
192
+ ## VAE 平铺
193
+
194
+ VAE 平铺通过将图像划分为较小的重叠图块而不是一次性处理整个图像来节省内存。这也减少了峰值内存使用量,因为 GPU 一次只处理一个图块。
195
+
196
+ 调用 [`~StableDiffusionPipeline.enable_vae_tiling`] 来启用 VAE 平铺。生成的图像可能因图块到图块的色调变化而有所不同,因为它们被单独解码,但图块之间不应有明显的接缝。对于低于预设(但可配置)限制的分辨率,平铺被禁用。例如,对于 [`StableDiffusionPipeline`] 中的 VAE,此限制为 512x512。
197
+
198
+ ```py
199
+ import torch
200
+ from diffusers import AutoPipelineForImage2Image
201
+ from diffusers.utils import load_image
202
+
203
+ pipeline = AutoPipelineForImage2Image.from_pretrained(
204
+ "stabilityai/stable-diffusion-xl-base-1.0", torch_dtype=torch.float16
205
+ ).to("cuda")
206
+ pipeline.enable_vae_tiling()
207
+
208
+ init_image = load_image("https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/img2img-sdxl-init.png")
209
+ prompt = "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k"
210
+ pipeline(prompt, image=init_image, strength=0.5).images[0]
211
+ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
212
+ ```
213
+
214
+ > [!WARNING]
215
+ > [`AutoencoderKLWan`] 和 [`AsymmetricAutoencoderKL`] 不支持平铺。
216
+
217
+ ## 卸载
218
+
219
+ 卸载策略将非当前活动层移动
220
+ 将模型移动到 CPU 以避免增加 GPU 内存。这些策略可以与量化和 torch.compile 结合使用,以平衡推理速度和内存使用。
221
+
222
+ 有关更多详细信息,请参考 [编译和卸载量化模型](./speed-memory-optims) 指南。
223
+
224
+ ### CPU 卸载
225
+
226
+ CPU 卸载选择性地将权重从 GPU 移动到 CPU。当需要某个组件时,它被传输到 GPU;当不需要时,它被移动到 CPU。此方法作用于子模块而非整个模型。它通过避免将整个模型存储在 GPU 上来节省内存。
227
+
228
+ CPU 卸载显著减少内存使用,但由于子模块在设备之间多次来回传递,它也非常慢。由于速度极慢,它通常不实用。
229
+
230
+ > [!WARNING]
231
+ > 在调用 [`~DiffusionPipeline.enable_sequential_cpu_offload`] 之前,不要将管道移动到 CUDA,否则节省的内存非常有限(更多细节请参考此 [issue](https://github.com/huggingface/diffusers/issues/1934))。这是一个状态操作,会在模型上安装钩子。
232
+
233
+ 调用 [`~DiffusionPipeline.enable_sequential_cpu_offload`] 以在管道上启用它。
234
+
235
+ ```py
236
+ import torch
237
+ from diffusers import DiffusionPipeline
238
+
239
+ pipeline = DiffusionPipeline.from_pretrained(
240
+ "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
241
+ )
242
+ pipeline.enable_sequential_cpu_offload()
243
+
244
+ pipeline(
245
+ prompt="An astronaut riding a horse on Mars",
246
+ guidance_scale=0.,
247
+ height=768,
248
+ width=1360,
249
+ num_inference_steps=4,
250
+ max_sequence_length=256,
251
+ ).images[0]
252
+ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
253
+ ```
254
+
255
+ ### 模型卸载
256
+
257
+ 模型卸载将整个模型移动到 GPU,而不是选择性地移动某些层或模型组件。一个主要管道模型,通常是文本编码器、UNet 和 VAE,被放置在 GPU 上,而其他组件保持在 CPU 上。像 UNet 这样运行多次的组件会一直留在 GPU 上,直到完全完成且不再需要。这消除了 [CPU 卸载](#cpu-offloading) 的通信开销,使模型卸载成为一个更快的替代方案。权衡是内存节省不会那么大。
258
+
259
+ > [!WARNING]
260
+ > 请注意,如果在安装钩子后模型在管道外部被重用(更多细节请参考 [移除钩子](https://huggingface.co/docs/accelerate/en/package_reference/big_modeling#accelerate.hooks.remove_hook_from_module)),您需要按预期顺序运行整个管道和模型以正确卸载它们。这是一个状态操作,会在模型上安装钩子。
261
+
262
+ 调用 [`~DiffusionPipeline.enable_model_cpu_offload`] 以在管道上启用它。
263
+
264
+ ```py
265
+ import torch
266
+ from diffusers import DiffusionPipeline
267
+
268
+ pipeline = DiffusionPipeline.from_pretrained(
269
+ "black-forest-labs/FLUX.1-schnell", torch_dtype=torch.bfloat16
270
+ )
271
+ pipeline.enable_model_cpu_offload()
272
+
273
+ pipeline(
274
+ prompt="An astronaut riding a horse on Mars",
275
+ guidance_scale=0.,
276
+ height=768,
277
+ width=1360,
278
+ num_inference_steps=4,
279
+ max_sequence_length=256,
280
+ ).images[0]
281
+ print(f"最大内存保留: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
282
+ ```
283
+
284
+ [`~DiffusionPipeline.enable_model_cpu_offload`] 在您单独使用 [`~StableDiffusionXLPipeline.encode_prompt`] 方法生成文本编码器隐藏状态时也有帮助。
285
+
286
+ ### 组卸载
287
+
288
+ 组卸载将内部层组([torch.nn.ModuleList](https://pytorch.org/docs/stable/generated/torch.nn.ModuleList.html) 或 [torch.nn.Sequential](https://pytorch.org/docs/stable/generated/torch.nn.Sequential.html))移动到 CPU。它比[模型卸载](#model-offloading)使用更少的内存,并且比[CPU 卸载](#cpu-offloading)更快,因为它减少了通信开销。
289
+
290
+ > [!WARNING]
291
+ > 如果前向实现包含权重相关的输入设备转换,组卸载可能不适用于所有模型,因为它可能与组卸载的设备转换机制冲突。
292
+
293
+ 调用 [`~ModelMixin.enable_group_offload`] 为继承自 [`ModelMixin`] 的标准 Diffusers 模型组件启用它。对于不继承自 [`ModelMixin`] 的其他模型组件,例如通用 [torch.nn.Module](https://pytorch.org/docs/stable/generated/torch.nn.Module.html),使用 [`~hooks.apply_group_offloading`] 代替。
294
+
295
+ `offload_type` 参数可以设置为 `block_level` 或 `leaf_level`。
296
+
297
+ - `block_level` 基于 `num_blocks_per_group` 参数卸载层组。例如,如果 `num_blocks_per_group=2` 在一个有 40 层的模型上,每次加载和卸载 2 层(总共 20 次加载/卸载)。这大大减少了内存需求。
298
+ - `leaf_level` 在最低级别卸载单个层,等同于[CPU 卸载](#cpu-offloading)。但如果您使用流而不放弃推理速度,它可以更快。
299
+
300
+ ```py
301
+ import torch
302
+ from diffusers import CogVideoXPipeline
303
+ from diffusers.hooks import apply_group_offloading
304
+ from diffusers.utils import export_to_video
305
+
306
+ onload_device = torch.device("cuda")
307
+ offload_device = torch.device("cpu")
308
+ pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16)
309
+
310
+ # 对 Diffusers 模型实现使用 enable_group_offload 方法
311
+ pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level")
312
+ pipeline.vae.enable_group_offload(onload_device=onload_device, offload_type="leaf_level")
313
+
314
+ # 对其他模型组件使用 apply_group_offloading 方法
315
+ apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=2)
316
+
317
+ prompt = (
318
+ "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
319
+ "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
320
+ "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
321
+ "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
322
+ "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
323
+ "atmosphere of this unique musical performance."
324
+ )
325
+ video = pipeline(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
326
+ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
327
+ export_to_video(video, "output.mp4", fps=8)
328
+ ```
329
+
330
+ #### CUDA 流
331
+ `use_stream` 参数可以激活支持异步数据传输流的 CUDA 设备,以减少整体执行时间,与 [CPU 卸载](#cpu-offloading) 相比。它通过使用层预取重叠数据传输和计算。下一个要执行的层在当前层仍在执行时加载到 GPU 上。这会显著增加 CPU 内存,因此请确保您有模型大小的 2 倍内存。
332
+
333
+ 设置 `record_stream=True` 以获得更多速度提升,代价是内存使用量略有增加。请参阅 [torch.Tensor.record_stream](https://pytorch.org/docs/stable/generated/torch.Tensor.record_stream.html) 文档了解更多信息。
334
+
335
+ > [!TIP]
336
+ > 当 `use_stream=True` 在启用平铺的 VAEs 上时,确保在推理前进行虚拟前向传递(可以使用虚拟输入),以避免设备不匹配错误。这可能不适用于所有实现,因此如果遇到任何问题,请随时提出问题。
337
+
338
+ 如果您在使用启用 `use_stream` 的 `block_level` 组卸载,`num_blocks_per_group` 参数应设置为 `1`,否则会引发警告。
339
+
340
+ ```py
341
+ pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", use_stream=True, record_stream=True)
342
+ ```
343
+
344
+ `low_cpu_mem_usage` 参数可以设置为 `True`,以在使用流进行组卸载时减少 CPU 内存使用。它最适合 `leaf_level` 卸载和 CPU 内存瓶颈的情况。通过动态创建固定张量而不是预先固定它们来节省内存。然而,这可能会增加整体执行时间。
345
+
346
+ #### 卸载到磁盘
347
+ 组卸载可能会消耗大量系统内存,具体取决于模型大小。在内存有限的系统上,尝试将组卸载到磁盘作为辅助内存。
348
+
349
+ 在 [`~ModelMixin.enable_group_offload`] 或 [`~hooks.apply_group_offloading`] 中设置 `offload_to_disk_path` 参数,将模型卸载到磁盘。
350
+
351
+ ```py
352
+ pipeline.transformer.enable_group_offload(onload_device=onload_device, offload_device=offload_device, offload_type="leaf_level", offload_to_disk_path="path/to/disk")
353
+
354
+ apply_group_offloading(pipeline.text_encoder, onload_device=onload_device, offload_type="block_level", num_blocks_per_group=2, offload_to_disk_path="path/to/disk")
355
+ ```
356
+
357
+ 参考这些[两个](https://github.com/huggingface/diffusers/pull/11682#issue-3129365363)[表格](https://github.com/huggingface/diffusers/pull/11682#issuecomment-2955715126)来比较速度和内存的权衡。
358
+
359
+ ## 分层类型转换
360
+
361
+ > [!TIP]
362
+ > 将分层类型转���与[组卸载](#group-offloading)结合使用,以获得更多内存节省。
363
+
364
+ 分层类型转换将权重存储在较小的数据格式中(例如 `torch.float8_e4m3fn` 和 `torch.float8_e5m2`),以使用更少的内存,并在计算时将那些权重上转换为更高精度如 `torch.float16` 或 `torch.bfloat16`。某些层(归一化和调制相关权重)被跳过,因为将它们存储在 fp8 中可能会降低生成质量。
365
+
366
+ > [!WARNING]
367
+ > 如果前向实现包含权重的内部类型转换,分层类型转换可能不适用于所有模型。当前的分层类型转换实现假设前向传递独立于权重精度,并且输入数据类型始终在 `compute_dtype` 中指定(请参见[这里](https://github.com/huggingface/transformers/blob/7f5077e53682ca855afc826162b204ebf809f1f9/src/transformers/models/t5/modeling_t5.py#L294-L299)以获取不兼容的实现)。
368
+ >
369
+ > 分层类型转换也可能在使用[PEFT](https://huggingface.co/docs/peft/index)层的自定义建模实现上失败。有一些检查可用,但它们没有经过广泛测试或保证在所有情况下都能工作。
370
+
371
+ 调用 [`~ModelMixin.enable_layerwise_casting`] 来设置存储和计算数据类型。
372
+
373
+ ```py
374
+ import torch
375
+ from diffusers import CogVideoXPipeline, CogVideoXTransformer3DModel
376
+ from diffusers.utils import export_to_video
377
+
378
+ transformer = CogVideoXTransformer3DModel.from_pretrained(
379
+ "THUDM/CogVideoX-5b",
380
+ subfolder="transformer",
381
+ torch_dtype=torch.bfloat16
382
+ )
383
+ transformer.enable_layerwise_casting(storage_dtype=torch.float8_e4m3fn, compute_dtype=torch.bfloat16)
384
+
385
+ pipeline = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b",
386
+ transformer=transformer,
387
+ torch_dtype=torch.bfloat16
388
+ ).to("cuda")
389
+ prompt = (
390
+ "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. "
391
+ "The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
392
+ "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
393
+ "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
394
+ "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
395
+ "atmosphere of this unique musical performance."
396
+ )
397
+ video = pipeline(prompt=prompt, guidance_scale=6, num_inference_steps=50).frames[0]
398
+ print(f"Max memory reserved: {torch.cuda.max_memory_allocated() / 1024**3:.2f} GB")
399
+ export_to_video(video, "output.mp4", fps=8)
400
+ ```
401
+
402
+ [`~hooks.apply_layerwise_casting`] 方法也可以在您需要更多控制和灵活性时使用。它可以通过在特定内部模块上调用它来部分应用于模型层。使用 `skip_modules_pattern` 或 `skip_modules_classes` 参数来指定要避免的模块,例如归一化和调制层。
403
+
404
+ ```python
405
+ import torch
406
+ from diffusers import CogVideoXTransformer3DModel
407
+ from diffusers.hooks import apply_layerwise_casting
408
+
409
+ transformer = CogVideoXTransformer3DModel.from_pretrained(
410
+ "THUDM/CogVideoX-5b",
411
+ subfolder="transformer",
412
+ torch_dtype=torch.bfloat16
413
+ )
414
+
415
+ # 跳过归一化层
416
+ apply_layerwise_casting(
417
+ transformer,
418
+ storage_dtype=torch.float8_e4m3fn,
419
+ compute_dtype=torch.bfloat16,
420
+ skip_modules_classes=["norm"],
421
+ non_blocking=True,
422
+ )
423
+ ```
424
+
425
+ ## torch.channels_last
426
+
427
+ [torch.channels_last](https://pytorch.org/tutorials/intermediate/memory_format_tutorial.html) 将张量的存储方式从 `(批次大小, 通道数, 高度, 宽度)` 翻转为 `(批次大小, 高度, 宽度, 通道数)`。这使张量与硬件如何顺序访问存储在内存中的张量对齐,并避免了在内存中跳转以访问像素值。
428
+
429
+ 并非所有运算符当前都支持通道最后格式,并且可能导致性能更差,但仍然值得尝试。
430
+
431
+ ```py
432
+ print(pipeline.unet.conv_out.state_dict()["weight"].stride()) # (2880, 9, 3, 1)
433
+ pipeline.unet.to(memory_format=torch.channels_last) # 原地操作
434
+ print(
435
+ pipeline.unet.conv_out.state_dict()["weight"].stride()
436
+ ) # (2880, 1, 960, 320) 第二个维度的跨度为1证明它有效
437
+ ```
438
+
439
+ ## torch.jit.trace
440
+
441
+ [torch.jit.trace](https://pytorch.org/docs/stable/generated/torch.jit.trace.html) 记录模型在样本输入上执行的操作,并根据记录的执行路径创建一个新的、优化的模型表示。在跟踪过程中,模型被优化以减少来自Python和动态控制流的开销,并且操作被融合在一起以提高效率。返回的可执行文件或 [ScriptFunction](https://pytorch.org/docs/stable/generated/torch.jit.ScriptFunction.html) 可以被编译。
442
+
443
+ ```py
444
+ import time
445
+ import torch
446
+ from diffusers import StableDiffusionPipeline
447
+ import functools
448
+
449
+ # torch 禁用梯度
450
+ torch.set_grad_enabled(False)
451
+
452
+ # 设置变量
453
+ n_experiments = 2
454
+ unet_runs_per_experiment = 50
455
+
456
+ # 加载样本输入
457
+ def generate_inputs():
458
+ sample = torch.randn((2, 4, 64, 64), device="cuda", dtype=torch.float16)
459
+ timestep = torch.rand(1, device="cuda", dtype=torch.float16) * 999
460
+ encoder_hidden_states = torch.randn((2, 77, 768), device="cuda", dtype=torch.float16)
461
+ return sample, timestep, encoder_hidden_states
462
+
463
+
464
+ pipeline = StableDiffusionPipeline.from_pretrained(
465
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
466
+ torch_dtype=torch.float16,
467
+ use_safetensors=True,
468
+ ).to("cuda")
469
+ unet = pipeline.unet
470
+ unet.eval()
471
+ unet.to(memory
472
+ _format=torch.channels_last) # 使用 channels_last 内存格式
473
+ unet.forward = functools.partial(unet.forward, return_dict=False) # 设置 return_dict=False 为默认
474
+
475
+ # 预热
476
+ for _ in range(3):
477
+ with torch.inference_mode():
478
+ inputs = generate_inputs()
479
+ orig_output = unet(*inputs)
480
+
481
+ # 追踪
482
+ print("tracing..")
483
+ unet_traced = torch.jit.trace(unet, inputs)
484
+ unet_traced.eval()
485
+ print("done tracing")
486
+
487
+ # 预热和优化图
488
+ for _ in range(5):
489
+ with torch.inference_mode():
490
+ inputs = generate_inputs()
491
+ orig_output = unet_traced(*inputs)
492
+
493
+ # 基准测试
494
+ with torch.inference_mode():
495
+ for _ in range(n_experiments):
496
+ torch.cuda.synchronize()
497
+ start_time = time.time()
498
+ for _ in range(unet_runs_per_experiment):
499
+ orig_output = unet_traced(*inputs)
500
+ torch.cuda.synchronize()
501
+ print(f"unet traced inference took {time.time() - start_time:.2f} seconds")
502
+ for _ in range(n_experiments):
503
+ torch.cuda.synchronize()
504
+ start_time = time.time()
505
+ for _ in range(unet_runs_per_experiment):
506
+ orig_output = unet(*inputs)
507
+ torch.cuda.synchronize()
508
+ print(f"unet inference took {time.time() - start_time:.2f} seconds")
509
+
510
+ # 保存模型
511
+ unet_traced.save("unet_traced.pt")
512
+ ```
513
+
514
+ 替换管道的 UNet 为追踪版本。
515
+
516
+ ```py
517
+ import torch
518
+ from diffusers import StableDiffusionPipeline
519
+ from dataclasses import dataclass
520
+
521
+ @dataclass
522
+ class UNet2DConditionOutput:
523
+ sample: torch.Tensor
524
+
525
+ pipeline = StableDiffusionPipeline.from_pretrained(
526
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
527
+ torch_dtype=torch.float16,
528
+ use_safetensors=True,
529
+ ).to("cuda")
530
+
531
+ # 使用 jitted unet
532
+ unet_traced = torch.jit.load("unet_traced.pt")
533
+
534
+ # del pipeline.unet
535
+ class TracedUNet(torch.nn.Module):
536
+ def __init__(self):
537
+ super().__init__()
538
+ self.in_channels = pipe.unet.config.in_channels
539
+ self.device = pipe.unet.device
540
+
541
+ def forward(self, latent_model_input, t, encoder_hidden_states):
542
+ sample = unet_traced(latent_model_input, t, encoder_hidden_states)[0]
543
+ return UNet2DConditionOutput(sample=sample)
544
+
545
+ pipeline.unet = TracedUNet()
546
+
547
+ with torch.inference_mode():
548
+ image = pipe([prompt] * 1, num_inference_steps=50).images[0]
549
+ ```
550
+
551
+ ## 内存高效注意力
552
+
553
+ > [!TIP]
554
+ > 内存高效注意力优化内存使用 *和* [推理速度](./fp16#scaled-dot-product-attention)!
555
+
556
+ Transformers 注意力机制是内存密集型的,尤其对于长序列,因此您可以尝试使用不同且更内存高效的注意力类型。
557
+
558
+ 默认情况下,如果安装了 PyTorch >= 2.0,则使用 [scaled dot-product attention (SDPA)](https://pytorch.org/docs/stable/generated/torch.nn.functional.scaled_dot_product_attention.html)。您无需对代码进行任何额外更改。
559
+
560
+ SDPA 还支持 [FlashAttention](https://github.com/Dao-AILab/flash-attention) 和 [xFormers](https://github.com/facebookresearch/xformers),以及 a
561
+ 这是一个原生的 C++ PyTorch 实现。它会根据您的输入自动选择最优的实现。
562
+
563
+ 您可以使用 [`~ModelMixin.enable_xformers_memory_efficient_attention`] 方法显式地使用 xFormers。
564
+
565
+ ```py
566
+ # pip install xformers
567
+ import torch
568
+ from diffusers import StableDiffusionXLPipeline
569
+
570
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
571
+ "stabilityai/stable-diffusion-xl-base-1.0",
572
+ torch_dtype=torch.float16,
573
+ ).to("cuda")
574
+ pipeline.enable_xformers_memory_efficient_attention()
575
+ ```
576
+
577
+ 调用 [`~ModelMixin.disable_xformers_memory_efficient_attention`] 来禁用它。
578
+
579
+ ```py
580
+ pipeline.disable_xformers_memory_efficient_attention()
581
+ ```
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/mps.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 The HuggingFace Team。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版本("许可证")授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,根据许可证分发的软件按"原样"分发,无任何明示或暗示的担保或条件。请参阅许可证了解具体的语言管理权限和限制。
8
+ -->
9
+
10
+ # Metal Performance Shaders (MPS)
11
+
12
+ > [!TIP]
13
+ > 带有 <img alt="MPS" src="https://img.shields.io/badge/MPS-000000?style=flat&logo=apple&logoColor=white%22"> 徽章的管道表示模型可以利用 Apple silicon 设备上的 MPS 后端进行更快的推理。欢迎提交 [Pull Request](https://github.com/huggingface/diffusers/compare) 来为缺少此徽章的管道添加它。
14
+
15
+ 🤗 Diffusers 与 Apple silicon(M1/M2 芯片)兼容,使用 PyTorch 的 [`mps`](https://pytorch.org/docs/stable/notes/mps.html) 设备,该设备利用 Metal 框架来发挥 MacOS 设备上 GPU 的性能。您需要具备:
16
+
17
+ - 配备 Apple silicon(M1/M2)硬件的 macOS 计算机
18
+ - macOS 12.6 或更高版本(推荐 13.0 或更高)
19
+ - arm64 版本的 Python
20
+ - [PyTorch 2.0](https://pytorch.org/get-started/locally/)(推荐)或 1.13(支持 `mps` 的最低版本)
21
+
22
+ `mps` 后端使用 PyTorch 的 `.to()` 接口将 Stable Diffusion 管道移动到您的 M1 或 M2 设备上:
23
+
24
+ ```python
25
+ from diffusers import DiffusionPipeline
26
+
27
+ pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5")
28
+ pipe = pipe.to("mps")
29
+
30
+ # 如果您的计算机内存小于 64 GB,推荐使用
31
+ pipe.enable_attention_slicing()
32
+
33
+ prompt = "a photo of an astronaut riding a horse on mars"
34
+ image = pipe(prompt).images[0]
35
+ image
36
+ ```
37
+
38
+ <Tip warning={true}>
39
+
40
+ PyTorch [mps](https://pytorch.org/docs/stable/notes/mps.html) 后端不支持大小超过 `2**32` 的 NDArray。如果您遇到此问题,请提交 [Issue](https://github.com/huggingface/diffusers/issues/new/choose) 以便我们调查。
41
+
42
+ </Tip>
43
+
44
+ 如果您使用 **PyTorch 1.13**,您需要通过管道进行一次额外的"预热"传递。这是一个临时解决方法,用于解决首次推理传递产生的结果与后续传递略有不同的问题。您只需要执行此传递一次,并且在仅进行一次推理步骤后可以丢弃结果。
45
+
46
+ ```diff
47
+ from diffusers import DiffusionPipeline
48
+
49
+ pipe = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5").to("mps")
50
+ pipe.enable_attention_slicing()
51
+
52
+ prompt = "a photo of an astronaut riding a horse on mars"
53
+ # 如果 PyTorch 版本是 1.13,进行首次"预热"传递
54
+ + _ = pipe(prompt, num_inference_steps=1)
55
+
56
+ # 预热传递后,结果与 CPU 设备上的结果匹配。
57
+ image = pipe(prompt).images[0]
58
+ ```
59
+
60
+ ## 故障排除
61
+
62
+ 本节列出了使用 `mps` 后端时的一些常见问题及其解决方法。
63
+
64
+ ### 注意力切片
65
+
66
+ M1/M2 性能对内存压力非常敏感。当发生这种情况时,系统会自动交换内存,这会显著降低性能。
67
+
68
+ 为了防止这种情况发生,我们建议使用*注意力切片*来减少推理过程中的内存压力并防止交换。这在您的计算机系统内存少于 64GB 或生成非标准分辨率(大于 512×512 像素)的图像时尤其相关。在您的管道上调用 [`~DiffusionPipeline.enable_attention_slicing`] 函数:
69
+
70
+ ```py
71
+ from diffusers import DiffusionPipeline
72
+ import torch
73
+
74
+ pipeline = DiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, variant="fp16", use_safetensors=True).to("mps")
75
+ pipeline.enable_attention_slicing()
76
+ ```
77
+
78
+ 注意力切片将昂贵的注意力操作分多个步骤执行,而不是一次性完成。在没有统一内存的计算机中,它通常能提高约 20% 的性能,但我们观察到在大多数 Apple 芯片计算机中,除非您有 64GB 或更多 RAM,否则性能会*更好*。
79
+
80
+ ### 批量推理
81
+
82
+ 批量生成多个提示可能会导致崩溃或无法可靠工作。如果是这种情况,请尝试迭代而不是批量处理。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/neuron.md ADDED
@@ -0,0 +1,59 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 The HuggingFace Team。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版(“许可证”)授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,无任何明示或暗示的担保或条件。请参阅许可证了解特定语言管理权限和限制。
8
+ -->
9
+
10
+ # AWS Neuron
11
+
12
+ Diffusers 功能可在 [AWS Inf2 实例](https://aws.amazon.com/ec2/instance-types/inf2/)上使用,这些是由 [Neuron 机器学习加速器](https://aws.amazon.com/machine-learning/inferentia/)驱动的 EC2 实例。这些实例旨在提供更好的计算性能(更高的吞吐量、更低的延迟)和良好的成本效益,使其成为 AWS 用户将扩散模型部署到生产环境的良好选择。
13
+
14
+ [Optimum Neuron](https://huggingface.co/docs/optimum-neuron/en/index) 是 Hugging Face 库与 AWS 加速器之间的接口,包括 AWS [Trainium](https://aws.amazon.com/machine-learning/trainium/) 和 AWS [Inferentia](https://aws.amazon.com/machine-learning/inferentia/)。它支持 Diffusers 中的许多功能,并具有类似的 API,因此如果您已经熟悉 Diffusers,学习起来更容易。一旦您创建了 AWS Inf2 实例,请安装 Optimum Neuron。
15
+
16
+ ```bash
17
+ python -m pip install --upgrade-strategy eager optimum[neuronx]
18
+ ```
19
+
20
+ <Tip>
21
+
22
+ 我们提供预构建的 [Hugging Face Neuron 深度学习 AMI](https://aws.amazon.com/marketplace/pp/prodview-gr3e6yiscria2)(DLAMI)和用于 Amazon SageMaker 的 Optimum Neuron 容器。建议正确设置您的环境。
23
+
24
+ </Tip>
25
+
26
+ 下面的示例演示了如何在 inf2.8xlarge 实例上使用 Stable Diffusion XL 模型生成图像(一旦模型编译完成,您可以切换到更便宜的 inf2.xlarge 实例)。要生成一些图像,请使用 [`~optimum.neuron.NeuronStableDiffusionXLPipeline`] 类,该类类似于 Diffusers 中的 [`StableDiffusionXLPipeline`] 类。
27
+
28
+ 与 Diffusers 不同,您需要将管道中的模型编译为 Neuron 格式,即 `.neuron`。运行以下命令将模型导出为 `.neuron` 格式。
29
+
30
+ ```bash
31
+ optimum-cli export neuron --model stabilityai/stable-diffusion-xl-base-1.0 \
32
+ --batch_size 1 \
33
+ --height 1024 `# 生成图像的高度(像素),例如 768, 1024` \
34
+ --width 1024 `# 生成图像的宽度(像素),例如 768, 1024` \
35
+ --num_images_per_prompt 1 `# 每个提示生成的图像数量,默认为 1` \
36
+ --auto_cast matmul `# 仅转换矩阵乘法操作` \
37
+ --auto_cast_type bf16 `# 将操作从 FP32 转换为 BF16` \
38
+ sd_neuron_xl/
39
+ ```
40
+
41
+ 现在使用预编译的 SDXL 模型生成一些图像。
42
+
43
+ ```python
44
+ >>> from optimum.neuron import Neu
45
+ ronStableDiffusionXLPipeline
46
+
47
+ >>> stable_diffusion_xl = NeuronStableDiffusionXLPipeline.from_pretrained("sd_neuron_xl/")
48
+ >>> prompt = "a pig with wings flying in floating US dollar banknotes in the air, skyscrapers behind, warm color palette, muted colors, detailed, 8k"
49
+ >>> image = stable_diffusion_xl(prompt).images[0]
50
+ ```
51
+
52
+ <img
53
+ src="https://huggingface.co/datasets/Jingya/document_images/resolve/main/optimum/neuron/sdxl_pig.png"
54
+ width="256"
55
+ height="256"
56
+ alt="peggy generated by sdxl on inf2"
57
+ />
58
+
59
+ 欢迎查看Optimum Neuron [文档](https://huggingface.co/docs/optimum-neuron/en/inference_tutorials/stable_diffusion#generate-images-with-stable-diffusion-models-on-aws-inferentia)中更多不同用例的指南和示例!
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/onnx.md ADDED
@@ -0,0 +1,82 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ 根据 Apache License 2.0 许可证(以下简称"许可证")授权,除非符合许可证要求,否则不得使用本文件。您可以通过以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或以书面形式同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。
8
+ -->
9
+
10
+ # ONNX Runtime
11
+
12
+ 🤗 [Optimum](https://github.com/huggingface/optimum) 提供了兼容 ONNX Runtime 的 Stable Diffusion 流水线。您需要运行以下命令安装支持 ONNX Runtime 的 🤗 Optimum:
13
+
14
+ ```bash
15
+ pip install -q optimum["onnxruntime"]
16
+ ```
17
+
18
+ 本指南将展示如何使用 ONNX Runtime 运行 Stable Diffusion 和 Stable Diffusion XL (SDXL) 流水线。
19
+
20
+ ## Stable Diffusion
21
+
22
+ 要加载并运行推理,请使用 [`~optimum.onnxruntime.ORTStableDiffusionPipeline`]。若需加载 PyTorch 模型并实时转换为 ONNX 格式,请设置 `export=True`:
23
+
24
+ ```python
25
+ from optimum.onnxruntime import ORTStableDiffusionPipeline
26
+
27
+ model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
28
+ pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id, export=True)
29
+ prompt = "sailing ship in storm by Leonardo da Vinci"
30
+ image = pipeline(prompt).images[0]
31
+ pipeline.save_pretrained("./onnx-stable-diffusion-v1-5")
32
+ ```
33
+
34
+ <Tip warning={true}>
35
+
36
+ 当前批量生成多个提示可能会占用过高内存。在问题修复前,建议采用迭代方式而非批量处理。
37
+
38
+ </Tip>
39
+
40
+ 如需离线导出 ONNX 格式流水线供后续推理使用,请使用 [`optimum-cli export`](https://huggingface.co/docs/optimum/main/en/exporters/onnx/usage_guides/export_a_model#exporting-a-model-to-onnx-using-the-cli) 命令:
41
+
42
+ ```bash
43
+ optimum-cli export onnx --model stable-diffusion-v1-5/stable-diffusion-v1-5 sd_v15_onnx/
44
+ ```
45
+
46
+ 随后进行推理时(无需再次指定 `export=True`):
47
+
48
+ ```python
49
+ from optimum.onnxruntime import ORTStableDiffusionPipeline
50
+
51
+ model_id = "sd_v15_onnx"
52
+ pipeline = ORTStableDiffusionPipeline.from_pretrained(model_id)
53
+ prompt = "sailing ship in storm by Leonardo da Vinci"
54
+ image = pipeline(prompt).images[0]
55
+ ```
56
+
57
+ <div class="flex justify-center">
58
+ <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/onnxruntime/stable_diffusion_v1_5_ort_sail_boat.png">
59
+ </div>
60
+
61
+ 您可以在 🤗 Optimum [文档](https://huggingface.co/docs/optimum/) 中找到更多示例,Stable Diffusion 支持文生图、图生图和图像修复任务。
62
+
63
+ ## Stable Diffusion XL
64
+
65
+ 要加载并运行 SDXL 推理,请使用 [`~optimum.onnxruntime.ORTStableDiffusionXLPipeline`]:
66
+
67
+ ```python
68
+ from optimum.onnxruntime import ORTStableDiffusionXLPipeline
69
+
70
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
71
+ pipeline = ORTStableDiffusionXLPipeline.from_pretrained(model_id)
72
+ prompt = "sailing ship in storm by Leonardo da Vinci"
73
+ image = pipeline(prompt).images[0]
74
+ ```
75
+
76
+ 如需导出 ONNX 格式流水线供后续推理使用,请运行:
77
+
78
+ ```bash
79
+ optimum-cli export onnx --model stabilityai/stable-diffusion-xl-base-1.0 --task stable-diffusion-xl sd_xl_onnx/
80
+ ```
81
+
82
+ SDXL 的 ONNX 格式目前支持文生图和图生图任务。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/open_vino.md ADDED
@@ -0,0 +1,77 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 HuggingFace 团队。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版本("许可证")授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,根据许可证分发的软件按"原样"分发,无任何明示或暗示的担保或条件。请参阅许可证以了解具体的语言管理权限和限制。
8
+ -->
9
+
10
+ # OpenVINO
11
+
12
+ 🤗 [Optimum](https://github.com/huggingface/optimum-intel) 提供与 OpenVINO 兼容的 Stable Diffusion 管道,可在各种 Intel 处理器上执行推理(请参阅支持的设备[完整列表](https://docs.openvino.ai/latest/openvino_docs_OV_UG_supported_plugins_Supported_Devices.html))。
13
+
14
+ 您需要安装 🤗 Optimum Intel,并使用 `--upgrade-strategy eager` 选项以确保 [`optimum-intel`](https://github.com/huggingface/optimum-intel) 使用最新版本:
15
+
16
+ ```bash
17
+ pip install --upgrade-strategy eager optimum["openvino"]
18
+ ```
19
+
20
+ 本指南将展示如何使用 Stable Diffusion 和 Stable Diffusion XL (SDXL) 管道与 OpenVINO。
21
+
22
+ ## Stable Diffusion
23
+
24
+ 要加载并运行推理,请使用 [`~optimum.intel.OVStableDiffusionPipeline`]。如果您想加载 PyTorch 模型并即时转换为 OpenVINO 格式,请设置 `export=True`:
25
+
26
+ ```python
27
+ from optimum.intel import OVStableDiffusionPipeline
28
+
29
+ model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
30
+ pipeline = OVStableDiffusionPipeline.from_pretrained(model_id, export=True)
31
+ prompt = "sailing ship in storm by Rembrandt"
32
+ image = pipeline(prompt).images[0]
33
+
34
+ # 别忘了保存导出的模型
35
+ pipeline.save_pretrained("openvino-sd-v1-5")
36
+ ```
37
+
38
+ 为了进一步加速推理,静态重塑模型。如果您更改任何参数,例如输出高度或宽度,您需要再次静态重塑模型。
39
+
40
+ ```python
41
+ # 定义与输入和期望输出相关的形状
42
+ batch_size, num_images, height, width = 1, 1, 512, 512
43
+
44
+ # 静态重塑模型
45
+ pipeline.reshape(batch_size, height, width, num_images)
46
+ # 在推理前编译模型
47
+ pipeline.compile()
48
+
49
+ image = pipeline(
50
+ prompt,
51
+ height=height,
52
+ width=width,
53
+ num_images_per_prompt=num_images,
54
+ ).images[0]
55
+ ```
56
+ <div class="flex justify-center">
57
+ <img src="https://huggingface.co/datasets/optimum/documentation-images/resolve/main/intel/openvino/stable_diffusion_v1_5_sail_boat_rembrandt.png">
58
+ </div>
59
+
60
+ 您可以在 🤗 Optimum [文档](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion) 中找到更多示例,Stable Diffusion 支持文本到图像、图像到图像和修复。
61
+
62
+ ## Stable Diffusion XL
63
+
64
+ 要加载并运行 SDXL 推理,请使用 [`~optimum.intel.OVStableDiffusionXLPipeline`]:
65
+
66
+ ```python
67
+ from optimum.intel import OVStableDiffusionXLPipeline
68
+
69
+ model_id = "stabilityai/stable-diffusion-xl-base-1.0"
70
+ pipeline = OVStableDiffusionXLPipeline.from_pretrained(model_id)
71
+ prompt = "sailing ship in storm by Rembrandt"
72
+ image = pipeline(prompt).images[0]
73
+ ```
74
+
75
+ 为了进一步加速推理,可以如Stable Diffusion部分所示[静态重塑](#stable-diffusion)模型。
76
+
77
+ 您可以在🤗 Optimum[文档](https://huggingface.co/docs/optimum/intel/inference#stable-diffusion-xl)中找到更多示例,并且在OpenVINO中运行SDXL支持文本到图像和图像到图像。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/para_attn.md ADDED
@@ -0,0 +1,497 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # ParaAttention
2
+
3
+ <div class="flex justify-center">
4
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-performance.png">
5
+ </div>
6
+ <div class="flex justify-center">
7
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/hunyuan-video-performance.png">
8
+ </div>
9
+
10
+ 大型图像和视频生成模型,如 [FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev) 和 [HunyuanVideo](https://huggingface.co/tencent/HunyuanVideo),由于其规模,可能对实时应用和部署构成推理挑战。
11
+
12
+ [ParaAttention](https://github.com/chengzeyi/ParaAttention) 是一个实现了**上下文并行**和**第一块缓存**的库,可以与其他技术(如 torch.compile、fp8 动态量化)结合使用,以加速推理。
13
+
14
+ 本指南将展示如何在 NVIDIA L20 GPU 上对 FLUX.1-dev 和 HunyuanVideo 应用 ParaAttention。
15
+ 在我们的基线基准测试中,除了 HunyuanVideo 为避免内存不足错误外,未应用任何优化。
16
+
17
+ 我们的基线基准测试显示,FLUX.1-dev 能够在 28 步中生成 1024x1024 分辨率图像,耗时 26.36 秒;HunyuanVideo 能够在 30 步中生成 129 帧 720p 分辨率视频,耗时 3675.71 秒。
18
+
19
+ > [!TIP]
20
+ > 对于更快的上下文并行推理,请尝试使用支持 NVLink 的 NVIDIA A100 或 H100 GPU(如果可用),尤其是在 GPU 数量较多时。
21
+
22
+ ## 第一块缓存
23
+
24
+ 缓存模型中 transformer 块的输出并在后续推理步骤中重用它们,可以降低计算成本并加速推理。
25
+
26
+ 然而,很难决定何时重用缓存以确保生成图像或视频的质量。ParaAttention 直接使用**第一个 transformer 块输出的残差差异**来近似模型输出之间的差异。当差异足够小时,重用先前推理步骤的残差差异。换句话说,跳过去噪步骤。
27
+
28
+ 这在 FLUX.1-dev 和 HunyuanVideo 推理上实现了 2 倍加速,且质量非常好。
29
+
30
+ <figure>
31
+ <img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/ada-cache.png" alt="Cache in Diffusion Transformer" />
32
+ <figcaption>AdaCache 的工作原理,第一块缓存是其变体</figcaption>
33
+ </figure>
34
+
35
+ <hfoptions id="first-block-cache">
36
+ <hfoption id="FLUX-1.dev">
37
+
38
+ 要在 FLUX.1-dev 上应用第一块缓存,请调用 `apply_cache_on_pipe`,如下所示。0.08 是 FLUX 模型的默认残差差异值。
39
+
40
+ ```python
41
+ import time
42
+ import torch
43
+ from diffusers import FluxPipeline
44
+
45
+ pipe = FluxPipeline.from_pretrained(
46
+ "black-forest-labs/FLUX.1-dev",
47
+ torch_dtype=torch.bfloat16,
48
+ ).to("cuda")
49
+
50
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
51
+
52
+ apply_cache_on_pipe(pipe, residual_diff_thre
53
+ shold=0.08)
54
+
55
+ # 启用内存节省
56
+ # pipe.enable_model_cpu_offload()
57
+ # pipe.enable_sequential_cpu_offload()
58
+
59
+ begin = time.time()
60
+ image = pipe(
61
+ "A cat holding a sign that says hello world",
62
+ num_inference_steps=28,
63
+ ).images[0]
64
+ end = time.time()
65
+ print(f"Time: {end - begin:.2f}s")
66
+
67
+ print("Saving image to flux.png")
68
+ image.save("flux.png")
69
+ ```
70
+
71
+ | 优化 | 原始 | FBCache rdt=0.06 | FBCache rdt=0.08 | FBCache rdt=0.10 | FBCache rdt=0.12 |
72
+ | - | - | - | - | - | - |
73
+ | 预览 | ![Original](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-original.png) | ![FBCache rdt=0.06](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.06.png) | ![FBCache rdt=0.08](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.08.png) | ![FBCache rdt=0.10](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.10.png) | ![FBCache rdt=0.12](https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/flux-fbc-0.12.png) |
74
+ | 墙时间 (s) | 26.36 | 21.83 | 17.01 | 16.00 | 13.78 |
75
+
76
+ First Block Cache 将推理速度降低到 17.01 秒,与基线相比,或快 1.55 倍,同时保持几乎零质量损失。
77
+
78
+ </hfoption>
79
+ <hfoption id="HunyuanVideo">
80
+
81
+ 要在 HunyuanVideo 上应用 First Block Cache,请使用 `apply_cache_on_pipe`,如下所示。0.06 是 HunyuanVideo 模型的默认残差差值。
82
+
83
+ ```python
84
+ import time
85
+ import torch
86
+ from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
87
+ from diffusers.utils import export_to_video
88
+
89
+ model_id = "tencent/HunyuanVideo"
90
+ transformer = HunyuanVideoTransformer3DModel.from_pretrained(
91
+ model_id,
92
+ subfolder="transformer",
93
+ torch_dtype=torch.bfloat16,
94
+ revision="refs/pr/18",
95
+ )
96
+ pipe = HunyuanVideoPipeline.from_pretrained(
97
+ model_id,
98
+ transformer=transformer,
99
+ torch_dtype=torch.float16,
100
+ revision="refs/pr/18",
101
+ ).to("cuda")
102
+
103
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
104
+
105
+ apply_cache_on_pipe(pipe, residual_diff_threshold=0.6)
106
+
107
+ pipe.vae.enable_tiling()
108
+
109
+ begin = time.time()
110
+ output = pipe(
111
+ prompt="A cat walks on the grass, realistic",
112
+ height=720,
113
+ width=1280,
114
+ num_frames=129,
115
+ num_inference_steps=30,
116
+ ).frames[0]
117
+ end = time.time()
118
+ print(f"Time: {end - begin:.2f}s")
119
+
120
+ print("Saving video to hunyuan_video.mp4")
121
+ export_to_video(output, "hunyuan_video.mp4", fps=15)
122
+ ```
123
+
124
+ <video controls>
125
+ <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/hunyuan-video-original.mp4" type="video/mp4">
126
+ 您的浏览器不支持视频标签。
127
+ </video>
128
+
129
+ <small> HunyuanVideo 无 FBCache </small>
130
+
131
+ <video controls>
132
+ <source src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/para-attn/hunyuan-video-fbc.mp4" type="video/mp4">
133
+ Your browser does not support the video tag.
134
+ </video>
135
+
136
+ <small> HunyuanVideo 与 FBCache </small>
137
+
138
+ First Block Cache 将推理速度降低至 2271.06 秒,相比基线快了 1.62 倍,同时保持了几乎为零的质量损失。
139
+
140
+ </hfoption>
141
+ </hfoptions>
142
+
143
+ ## fp8 量化
144
+
145
+ fp8 动态量化进一步加速推理并减少内存使用。为了使用 8 位 [NVIDIA Tensor Cores](https://www.nvidia.com/en-us/data-center/tensor-cores/),必须对激活和权重进行量化。
146
+
147
+ 使用 `float8_weight_only` 和 `float8_dynamic_activation_float8_weight` 来量化文本编码器和变换器模型。
148
+
149
+ 默认量化方法是逐张量量化,但如果您的 GPU 支持逐行量化,您也可以尝试它以获得更好的准确性。
150
+
151
+ 使用以下命令安装 [torchao](https://github.com/pytorch/ao/tree/main)。
152
+
153
+ ```bash
154
+ pip3 install -U torch torchao
155
+ ```
156
+
157
+ [torch.compile](https://pytorch.org/tutorials/intermediate/torch_compile_tutorial.html) 使用 `mode="max-autotune-no-cudagraphs"` 或 `mode="max-autotune"` 选择最佳内核以获得性能。如果是第一次调用模型,编译可能会花费很长时间,但一旦模型编译完成,这是值得的。
158
+
159
+ 此示例仅量化变换器模型,但您也可以量化文本编码器以进一步减少内存使用。
160
+
161
+ > [!TIP]
162
+ > 动态量化可能会显著改变模型输出的分布,因此您需要将 `residual_diff_threshold` 设置为更大的值以使其生效。
163
+
164
+ <hfoptions id="fp8-quantization">
165
+ <hfoption id="FLUX-1.dev">
166
+
167
+ ```python
168
+ import time
169
+ import torch
170
+ from diffusers import FluxPipeline
171
+
172
+ pipe = FluxPipeline.from_pretrained(
173
+ "black-forest-labs/FLUX.1-dev",
174
+ torch_dtype=torch.bfloat16,
175
+ ).to("cuda")
176
+
177
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
178
+
179
+ apply_cache_on_pipe(
180
+ pipe,
181
+ residual_diff_threshold=0.12, # 使用更大的值以使缓存生效
182
+ )
183
+
184
+ from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only
185
+
186
+ quantize_(pipe.text_encoder, float8_weight_only())
187
+ quantize_(pipe.transformer, float8_dynamic_activation_float8_weight())
188
+ pipe.transformer = torch.compile(
189
+ pipe.transformer, mode="max-autotune-no-cudagraphs",
190
+ )
191
+
192
+ # 启用内存节省
193
+ # pipe.enable_model_cpu_offload()
194
+ # pipe.enable_sequential_cpu_offload()
195
+
196
+ for i in range(2):
197
+ begin = time.time()
198
+ image = pipe(
199
+ "A cat holding a sign that says hello world",
200
+ num_inference_steps=28,
201
+ ).images[0]
202
+ end = time.time()
203
+ if i == 0:
204
+ print(f"预热时间: {end - begin:.2f}s")
205
+ else:
206
+ print(f"时间: {end - begin:.2f}s")
207
+
208
+ print("保存图像到 flux.png")
209
+ image.save("flux.png")
210
+ ```
211
+
212
+ fp8 动态量化和 torch.compile 将推理速度降低至 7.56 秒,相比基线快了 3.48 倍。
213
+ </hfoption>
214
+ <hfoption id="HunyuanVideo">
215
+
216
+ ```python
217
+ import time
218
+ import torch
219
+ from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
220
+ from diffusers.utils import export_to_video
221
+
222
+ model_id = "tencent/HunyuanVideo"
223
+ transformer = HunyuanVideoTransformer3DModel.from_pretrained(
224
+ model_id,
225
+ subfolder="transformer",
226
+ torch_dtype=torch.bfloat16,
227
+ revision="refs/pr/18",
228
+ )
229
+ pipe = HunyuanVideoPipeline.from_pretrained(
230
+ model_id,
231
+ transformer=transformer,
232
+ torch_dtype=torch.float16,
233
+ revision="refs/pr/18",
234
+ ).to("cuda")
235
+
236
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
237
+
238
+ apply_cache_on_pipe(pipe)
239
+
240
+ from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only
241
+
242
+ quantize_(pipe.text_encoder, float8_weight_only())
243
+ quantize_(pipe.transformer, float8_dynamic_activation_float8_weight())
244
+ pipe.transformer = torch.compile(
245
+ pipe.transformer, mode="max-autotune-no-cudagraphs",
246
+ )
247
+
248
+ # Enable memory savings
249
+ pipe.vae.enable_tiling()
250
+ # pipe.enable_model_cpu_offload()
251
+ # pipe.enable_sequential_cpu_offload()
252
+
253
+ for i in range(2):
254
+ begin = time.time()
255
+ output = pipe(
256
+ prompt="A cat walks on the grass, realistic",
257
+ height=720,
258
+ width=1280,
259
+ num_frames=129,
260
+ num_inference_steps=1 if i == 0 else 30,
261
+ ).frames[0]
262
+ end = time.time()
263
+ if i == 0:
264
+ print(f"Warm up time: {end - begin:.2f}s")
265
+ else:
266
+ print(f"Time: {end - begin:.2f}s")
267
+
268
+ print("Saving video to hunyuan_video.mp4")
269
+ export_to_video(output, "hunyuan_video.mp4", fps=15)
270
+ ```
271
+
272
+ NVIDIA L20 GPU 仅有 48GB 内存,在编译后且如果未调用 `enable_model_cpu_offload` 时,可能会遇到内存不足(OOM)错误,因为 HunyuanVideo 在高分辨率和大量帧数运行时具有非常大的激活张量。对于内存少于 80GB 的 GPU,可以尝试降低分辨率和帧数来避免 OOM 错误。
273
+
274
+ 大型视频生成模型通常受注意力计算而非全连接层的瓶颈限制。这些模型不会从量化和 torch.compile 中显著受益。
275
+
276
+ </hfoption>
277
+ </hfoptions>
278
+
279
+ ## 上下文并行性
280
+
281
+ 上下文并行性并行化推理并随多个 GPU 扩展。ParaAttention 组合设计允许您将上下文并行性与第一块缓存和动态量化结合使用。
282
+
283
+ > [!TIP]
284
+ > 请参考 [ParaAttention](https://github.com/chengzeyi/ParaAttention/tree/main) 仓库获取详细说明和如何使用多个 GPU 扩展推理的示例。
285
+
286
+ 如果推理过程需要持久化和可服务,建议使用 [torch.multiprocessing](https://pytorch.org/docs/stable/multiprocessing.html) 编写您自己的推理处理器。这可以消除启动进程以及加载和重新编译模型的开销。
287
+
288
+ <hfoptions id="context-parallelism">
289
+ <hfoption id="FLUX-1.dev">
290
+
291
+ 以下代码示例结合了第一块缓存、fp8动态量化、torch.compile和上下文并行,以实现最快的推理速度。
292
+
293
+ ```python
294
+ import time
295
+ import torch
296
+ import torch.distributed as dist
297
+ from diffusers import FluxPipeline
298
+
299
+ dist.init_process_group()
300
+
301
+ torch.cuda.set_device(dist.get_rank())
302
+
303
+ pipe = FluxPipeline.from_pretrained(
304
+ "black-forest-labs/FLUX.1-dev",
305
+ torch_dtype=torch.bfloat16,
306
+ ).to("cuda")
307
+
308
+ from para_attn.context_parallel import init_context_parallel_mesh
309
+ from para_attn.context_parallel.diffusers_adapters import parallelize_pipe
310
+ from para_attn.parallel_vae.diffusers_adapters import parallelize_vae
311
+
312
+ mesh = init_context_parallel_mesh(
313
+ pipe.device.type,
314
+ max_ring_dim_size=2,
315
+ )
316
+ parallelize_pipe(
317
+ pipe,
318
+ mesh=mesh,
319
+ )
320
+ parallelize_vae(pipe.vae, mesh=mesh._flatten())
321
+
322
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
323
+
324
+ apply_cache_on_pipe(
325
+ pipe,
326
+ residual_diff_threshold=0.12, # 使用较大的值以使缓存生效
327
+ )
328
+
329
+ from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only
330
+
331
+ quantize_(pipe.text_encoder, float8_weight_only())
332
+ quantize_(pipe.transformer, float8_dynamic_activation_float8_weight())
333
+ torch._inductor.config.reorder_for_compute_comm_overlap = True
334
+ pipe.transformer = torch.compile(
335
+ pipe.transformer, mode="max-autotune-no-cudagraphs",
336
+ )
337
+
338
+ # 启用内存节省
339
+ # pipe.enable_model_cpu_offload(gpu_id=dist.get_rank())
340
+ # pipe.enable_sequential_cpu_offload(gpu_id=dist.get_rank())
341
+
342
+ for i in range(2):
343
+ begin = time.time()
344
+ image = pipe(
345
+ "A cat holding a sign that says hello world",
346
+ num_inference_steps=28,
347
+ output_type="pil" if dist.get_rank() == 0 else "pt",
348
+ ).images[0]
349
+ end = time.time()
350
+ if dist.get_rank() == 0:
351
+ if i == 0:
352
+ print(f"预热时间: {end - begin:.2f}s")
353
+ else:
354
+ print(f"时间: {end - begin:.2f}s")
355
+
356
+ if dist.get_rank() == 0:
357
+ print("将图像保存到flux.png")
358
+ image.save("flux.png")
359
+
360
+ dist.destroy_process_group()
361
+ ```
362
+
363
+ 保存到`run_flux.py`并使用[torchrun](https://pytorch.org/docs/stable/elastic/run.html)启动。
364
+
365
+ ```bash
366
+ # 使用--nproc_per_node指定GPU数量
367
+ torchrun --nproc_per_node=2 run_flux.py
368
+ ```
369
+
370
+ 推理速度降至8.20秒,相比基线快了3.21倍,使用2个NVIDIA L20 GPU。在4个L20上,推理速度为3.90秒,快了6.75倍。
371
+
372
+ </hfoption>
373
+ <hfoption id="HunyuanVideo">
374
+
375
+ 以下代码示例结合了第一块缓存和上下文并行,以实现最快的推理速度。
376
+
377
+ ```python
378
+ import time
379
+ import torch
380
+ import torch.distributed as dist
381
+ from diffusers import HunyuanVideoPipeline, HunyuanVideoTransformer3DModel
382
+ from diffusers.utils import export_to_video
383
+
384
+ dist.init_process_group()
385
+
386
+ torch.cuda.set_device(dist.get_rank())
387
+
388
+ model_id = "tencent/HunyuanVideo"
389
+ transformer = HunyuanVideoTransformer3DModel.from_pretrained(
390
+ model_id,
391
+ subfolder="transformer",
392
+ torch_dtype=torch.bfloat16,
393
+ revision="refs/pr/18",
394
+ )
395
+ pipe = HunyuanVideoPipeline.from_pretrained(
396
+ model_id,
397
+ transformer=transformer,
398
+ torch_dtype=torch.float16,
399
+ revision="refs/pr/18",
400
+ ).to("cuda")
401
+
402
+ from para_attn.context_parallel import init_context_parallel_mesh
403
+ from para_attn.context_parallel.diffusers_adapters import parallelize_pipe
404
+ from para_attn.parallel_vae.diffusers_adapters import parallelize_vae
405
+
406
+ mesh = init_context_parallel_mesh(
407
+ pipe.device.type,
408
+ )
409
+ parallelize_pipe(
410
+ pipe,
411
+ mesh=mesh,
412
+ )
413
+ parallelize_vae(pipe.vae, mesh=mesh._flatten())
414
+
415
+ from para_attn.first_block_cache.diffusers_adapters import apply_cache_on_pipe
416
+
417
+ apply_cache_on_pipe(pipe)
418
+
419
+ # from torchao.quantization import quantize_, float8_dynamic_activation_float8_weight, float8_weight_only
420
+ #
421
+ # torch._inductor.config.reorder_for_compute_comm_overlap = True
422
+ #
423
+ # quantize_(pipe.text_encoder, float8_weight_only())
424
+ # quantize_(pipe.transformer, float8_dynamic_activation_float8_weight())
425
+ # pipe.transformer = torch.compile(
426
+ # pipe.transformer, mode="max-autotune-no-cudagraphs",
427
+ # )
428
+
429
+ # 启用内存节省
430
+ pipe.vae.enable_tiling()
431
+ # pipe.enable_model_cpu_offload(gpu_id=dist.get_rank())
432
+ # pipe.enable_sequential_cpu_offload(gpu_id=dist.get_rank())
433
+
434
+ for i in range(2):
435
+ begin = time.time()
436
+ output = pipe(
437
+ prompt="A cat walks on the grass, realistic",
438
+ height=720,
439
+ width=1280,
440
+ num_frames=129,
441
+ num_inference_steps=1 if i == 0 else 30,
442
+ output_type="pil" if dist.get_rank() == 0 else "pt",
443
+ ).frames[0]
444
+ end = time.time()
445
+ if dist.get_rank() == 0:
446
+ if i == 0:
447
+ print(f"预热时间: {end - begin:.2f}s")
448
+ else:
449
+ print(f"时间: {end - begin:.2f}s")
450
+
451
+ if dist.get_rank() == 0:
452
+ print("保存视频到 hunyuan_video.mp4")
453
+ export_to_video(output, "hunyuan_video.mp4", fps=15)
454
+
455
+ dist.destroy_process_group()
456
+ ```
457
+
458
+ 保存到 `run_hunyuan_video.py` 并使用 [torchrun](https://pytorch.org/docs/stable/elastic/run.html) 启动。
459
+
460
+ ```bash
461
+ # 使用 --nproc_per_node 指定 GPU 数量
462
+ torchrun --nproc_per_node=8 run_hunyuan_video.py
463
+ ```
464
+
465
+ 推理速度降低到 649.23 秒,相比基线快 5.66 倍,使用 8 个 NVIDIA L20 GPU。
466
+
467
+ </hfoption>
468
+ </hfoptions>
469
+
470
+ ## 基准测试
471
+
472
+ <hfoptions id="conclusion">
473
+ <hfoption id="FLUX-1.dev">
474
+
475
+ | GPU 类型 | GPU 数量 | 优化 | 墙钟时间 (s) | 加速比 |
476
+ | - | - | - | - | - |
477
+ | NVIDIA L20 | 1 | 基线 | 26.36 | 1.00x |
478
+ | NVIDIA L20 | 1 | FBCache (rdt=0.08) | 17.01 | 1.55x |
479
+ | NVIDIA L20 | 1 | FP8 DQ | 13.40 | 1.96x |
480
+ | NVIDIA L20 | 1 | FBCache (rdt=0.12) + FP8 DQ | 7.56 | 3.48x |
481
+ | NVIDIA L20 | 2 | FBCache (rdt=0.12) + FP8 DQ + CP | 4.92 | 5.35x |
482
+ | NVIDIA L20 | 4 | FBCache (rdt=0.12) + FP8 DQ + CP | 3.90 | 6.75x |
483
+
484
+ </hfoption>
485
+ <hfoption id="HunyuanVideo">
486
+
487
+ | GPU 类型 | GPU 数量 | 优化 | 墙钟时间 (s) | 加速比 |
488
+ | - | - | - | - | - |
489
+ | NVIDIA L20 | 1 | 基线 | 3675.71 | 1.00x |
490
+ | NVIDIA
491
+ L20 | 1 | FBCache | 2271.06 | 1.62x |
492
+ | NVIDIA L20 | 2 | FBCache + CP | 1132.90 | 3.24x |
493
+ | NVIDIA L20 | 4 | FBCache + CP | 718.15 | 5.12x |
494
+ | NVIDIA L20 | 8 | FBCache + CP | 649.23 | 5.66x |
495
+
496
+ </hfoption>
497
+ </hfoptions>
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/pruna.md ADDED
@@ -0,0 +1,184 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Pruna
2
+
3
+ [Pruna](https://github.com/PrunaAI/pruna) 是一个模型优化框架,提供多种优化方法——量化、剪枝、缓存、编译——以加速推理并减少内存使用。以下是优化方法的概览。
4
+
5
+ | 技术 | 描述 | 速度 | 内存 | 质量 |
6
+ |------------|---------------------------------------------------------------------------------------|:----:|:----:|:----:|
7
+ | `batcher` | 将多个输入分组在一起同时处理,提高计算效率并减少处理时间。 | ✅ | ❌ | ➖ |
8
+ | `cacher` | 存储计算的中间结果以加速后续操作。 | ✅ | ➖ | ➖ |
9
+ | `compiler` | 为特定硬件优化模型指令。 | ✅ | ➖ | ➖ |
10
+ | `distiller`| 训练一个更小、更简单的模型来模仿一个更大、更复杂的模型。 | ✅ | ✅ | ❌ |
11
+ | `quantizer`| 降低权重和激活的精度,减少内存需求。 | ✅ | ✅ | ❌ |
12
+ | `pruner` | 移除不重要或冗余的连接和神经元,产生一个更稀疏、更高效的网络。 | ✅ | ✅ | ❌ |
13
+ | `recoverer`| 在压缩后恢复模型的性能。 | ➖ | ➖ | ✅ |
14
+ | `factorizer`| 将多个小矩阵乘法批处理为一个大型融合操作。 | ✅ | ➖ | ➖ |
15
+ | `enhancer` | 通过应用后处理算法(如去噪或上采样)来增强模型输出。 | ❌ | - | ✅ |
16
+
17
+ ✅ (改进), ➖ (大致相同), ❌ (恶化)
18
+
19
+ 在 [Pruna 文档](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html#configure-algorithms) 中探索所有优化方法。
20
+
21
+ ## 安装
22
+
23
+ 使用以下命令安装 Pruna。
24
+
25
+ ```bash
26
+ pip install pruna
27
+ ```
28
+
29
+ ## 优化 Diffusers 模型
30
+
31
+ Diffusers 模型支持广泛的优化算法,如下所示。
32
+
33
+ <div class="flex justify-center">
34
+ <img src="https://huggingface.co/datasets/PrunaAI/documentation-images/resolve/main/diffusers/diffusers_combinations.png" alt="Diffusers 模型支持的优化算法概览">
35
+ </div>
36
+
37
+ 下面的示例使用 factorizer、compiler 和 cacher 算法的组合优化 [black-forest-labs/FLUX.1-dev](https://huggingface.co/black-forest-labs/FLUX.1-dev)。这种组合将推理速度加速高达 4.2 倍,并将峰值 GPU 内存使用从 34.7GB 减少到 28.0GB,同时几乎保持相同的输出质量。
38
+
39
+ > [!TIP]
40
+ > 参考 [Pruna 优化](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html) 文档以了解更多关于该操作的信息。
41
+ 本示例中使用的优化技术。
42
+
43
+ <div class="flex justify-center">
44
+ <img src="https://huggingface.co/datasets/PrunaAI/documentation-images/resolve/main/diffusers/flux_combination.png" alt="用于FLUX.1-dev的优化技术展示,结合了因子分解器、编译器和缓存器算法">
45
+ </div>
46
+
47
+ 首先定义一个包含要使用的优化算法的`SmashConfig`。要优化模型,将管道和`SmashConfig`用`smash`包装,然后像往常一样使用管道进行推理。
48
+
49
+ ```python
50
+ import torch
51
+ from diffusers import FluxPipeline
52
+
53
+ from pruna import PrunaModel, SmashConfig, smash
54
+
55
+ # 加载模型
56
+ # 使用小GPU内存尝试segmind/Segmind-Vega或black-forest-labs/FLUX.1-schnell
57
+ pipe = FluxPipeline.from_pretrained(
58
+ "black-forest-labs/FLUX.1-dev",
59
+ torch_dtype=torch.bfloat16
60
+ ).to("cuda")
61
+
62
+ # 定义配置
63
+ smash_config = SmashConfig()
64
+ smash_config["factorizer"] = "qkv_diffusers"
65
+ smash_config["compiler"] = "torch_compile"
66
+ smash_config["torch_compile_target"] = "module_list"
67
+ smash_config["cacher"] = "fora"
68
+ smash_config["fora_interval"] = 2
69
+
70
+ # 为了获得最佳速度结果,可以添加这些配置
71
+ # 但它们会将预热时间从1.5分钟增加到10分钟
72
+ # smash_config["torch_compile_mode"] = "max-autotune-no-cudagraphs"
73
+ # smash_config["quantizer"] = "torchao"
74
+ # smash_config["torchao_quant_type"] = "fp8dq"
75
+ # smash_config["torchao_excluded_modules"] = "norm+embedding"
76
+
77
+ # 优化模型
78
+ smashed_pipe = smash(pipe, smash_config)
79
+
80
+ # 运行模型
81
+ smashed_pipe("a knitted purple prune").images[0]
82
+ ```
83
+
84
+ <div class="flex justify-center">
85
+ <img src="https://huggingface.co/datasets/PrunaAI/documentation-images/resolve/main/diffusers/flux_smashed_comparison.png">
86
+ </div>
87
+
88
+ 优化后,我们可以使用Hugging Face Hub共享和加载优化后的模型。
89
+
90
+ ```python
91
+ # 保存模型
92
+ smashed_pipe.save_to_hub("<username>/FLUX.1-dev-smashed")
93
+
94
+ # 加载模型
95
+ smashed_pipe = PrunaModel.from_hub("<username>/FLUX.1-dev-smashed")
96
+ ```
97
+
98
+ ## 评估和基准测试Diffusers模型
99
+
100
+ Pruna提供了[EvaluationAgent](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/evaluate.html)来���估优化后模型的质量。
101
+
102
+ 我们可以定义我们关心的指标,如总时间和吞吐量,以及要评估的数据集。我们可以定义一个模型并将其传递给`EvaluationAgent`。
103
+
104
+ <hfoptions id="eval">
105
+ <hfoption id="optimized model">
106
+
107
+ 我们可以通过使用`EvaluationAgent`加载和评估优化后的模型,并将其传递给`Task`。
108
+
109
+ ```python
110
+ import torch
111
+ from diffusers import FluxPipeline
112
+
113
+ from pruna import PrunaModel
114
+ from pruna.data.pruna_datamodule import PrunaDataModule
115
+ from pruna.evaluation.evaluation_agent import EvaluationAgent
116
+ from pruna.evaluation.metrics import (
117
+ ThroughputMetric,
118
+ TorchMetricWrapper,
119
+ TotalTimeMetric,
120
+ )
121
+ from pruna.evaluation.task import Task
122
+
123
+ # define the device
124
+ device = "cuda" if torch.cuda.is_available() else "mps" if torch.backends.mps.is_available() else "cpu"
125
+
126
+ # 加载模型
127
+ # 使用小GPU内存尝试 PrunaAI/Segmind-Vega-smashed 或 PrunaAI/FLUX.1-dev-smashed
128
+ smashed_pipe = PrunaModel.from_hub("PrunaAI/FLUX.1-dev-smashed")
129
+
130
+ # 定义指标
131
+ metrics = [
132
+ TotalTimeMetric(n_iterations=20, n_warmup_iterations=5),
133
+ ThroughputMetric(n_iterations=20, n_warmup_iterations=5),
134
+ TorchMetricWrapper("clip"),
135
+ ]
136
+
137
+ # 定义数据模块
138
+ datamodule = PrunaDataModule.from_string("LAION256")
139
+ datamodule.limit_datasets(10)
140
+
141
+ # 定义任务和评估代理
142
+ task = Task(metrics, datamodule=datamodule, device=device)
143
+ eval_agent = EvaluationAgent(task)
144
+
145
+ # 评估优化模型并卸载到CPU
146
+ smashed_pipe.move_to_device(device)
147
+ smashed_pipe_results = eval_agent.evaluate(smashed_pipe)
148
+ smashed_pipe.move_to_device("cpu")
149
+ ```
150
+
151
+ </hfoption>
152
+ <hfoption id="standalone model">
153
+
154
+ 除了比较优化模型与基础模型,您还可以评估独立的 `diffusers` 模型。这在您想评估模型性能而不考虑优化时非常有用。我们可以通过使用 `PrunaModel` 包装器并运行 `EvaluationAgent` 来实现。
155
+
156
+ ```python
157
+ import torch
158
+ from diffusers import FluxPipeline
159
+
160
+ from pruna import PrunaModel
161
+
162
+ # 加载模型
163
+ # 使用小GPU内存尝试 PrunaAI/Segmind-Vega-smashed 或 PrunaAI/FLUX.1-dev-smashed
164
+ pipe = FluxPipeline.from_pretrained(
165
+ "black-forest-labs/FLUX.1-dev",
166
+ torch_dtype=torch.bfloat16
167
+ ).to("cpu")
168
+ wrapped_pipe = PrunaModel(model=pipe)
169
+ ```
170
+
171
+ </hfoption>
172
+ </hfoptions>
173
+
174
+ 现在您已经了解了如何优化和评估您的模型,可以开始使用 Pruna 来优化您自己的模型了。幸运的是,我们有许多示例来帮助您入门。
175
+
176
+ > [!TIP]
177
+ > 有关基准测试 Flux 的更多详细信息,请查看 [宣布 FLUX-Juiced:最快的图像生成端点(快 2.6 倍)!](https://huggingface.co/blog/PrunaAI/flux-fastest-image-generation-endpoint) 博客文章和 [InferBench](https://huggingface.co/spaces/PrunaAI/InferBench) 空间。
178
+
179
+ ## 参考
180
+
181
+ - [Pruna](https://github.com/pruna-ai/pruna)
182
+ - [Pruna 优化](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/configure.html#configure-algorithms)
183
+ - [Pruna 评估](https://docs.pruna.ai/en/stable/docs_pruna/user_manual/evaluate.html)
184
+ - [Pruna 教程](https://docs.pruna.ai/en/stable/docs_pruna/tutorials/index.html)
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/speed-memory-optims.md ADDED
@@ -0,0 +1,200 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2024 The HuggingFace Team。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版(“许可证”)授权;除非符合许可证,否则不得使用此文件。
4
+ 您可以在以下网址获取许可证副本:
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。有关许可证的特定语言,请参阅许可证。
9
+ -->
10
+
11
+ # 编译和卸载量化模型
12
+
13
+ 优化模型通常涉及[推理速度](./fp16)和[内存使用](./memory)之间的权衡。例如,虽然[缓存](./cache)可以提高推理速度,但它也会增加内存消耗,因为它需要存储中间注意力层的输出。一种更平衡的优化策略结合了量化模型、[torch.compile](./fp16#torchcompile) 和各种[卸载方法](./memory#offloading)。
14
+
15
+ > [!TIP]
16
+ > 查看 [torch.compile](./fp16#torchcompile) 指南以了解更多关于编译以及如何在此处应用的信息。例如,区域编译可以显著减少编译时间,而不会放弃任何加速。
17
+
18
+ 对于图像生成,结合量化和[模型卸载](./memory#model-offloading)通常可以在质量、速度和内存之间提供最佳权衡。组卸载对于图像生成效果不佳,因为如果计算内核更快完成,通常不可能*完全*重叠数据传输。这会导致 CPU 和 GPU 之间的一些通信开销。
19
+
20
+ 对于视频生成,结合量化和[组卸载](./memory#group-offloading)往往更好,因为视频模型更受计算限制。
21
+
22
+ 下表提供了优化策略组合及其对 Flux 延迟和内存使用的影响的比较。
23
+
24
+ | 组合 | 延迟 (s) | 内存使用 (GB) |
25
+ |---|---|---|
26
+ | 量化 | 32.602 | 14.9453 |
27
+ | 量化, torch.compile | 25.847 | 14.9448 |
28
+ | 量化, torch.compile, 模型 CPU 卸载 | 32.312 | 12.2369 |
29
+ <small>这些结果是在 Flux 上使用 RTX 4090 进行基准测试的。transformer 和 text_encoder 组件已量化。如果您有兴趣评估自己的模型,请参考[基准测试脚本](https://gist.github.com/sayakpaul/0db9d8eeeb3d2a0e5ed7cf0d9ca19b7d)。</small>
30
+
31
+ 本指南将向您展示如何使用 [bitsandbytes](../quantization/bitsandbytes#torchcompile) 编译和卸载量化模型。确保您正在使用 [PyTorch nightly](https://pytorch.org/get-started/locally/) 和最新版本的 bitsandbytes。
32
+
33
+ ```bash
34
+ pip install -U bitsandbytes
35
+ ```
36
+
37
+ ## 量化和 torch.compile
38
+
39
+ 首先通过[量化](../quantization/overview)模型来减少存储所需的内存,并[编译](./fp16#torchcompile)它以加速推理。
40
+
41
+ 配置 [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `capture_dynamic_output_shape_ops = True` 以在编译 bitsandbytes 模型时处理动态输出。
42
+
43
+ ```py
44
+ import torch
45
+ from diffusers import DiffusionPipeline
46
+ from diffusers.quantizers import PipelineQuantizationConfig
47
+
48
+ torch._dynamo.config.capture_dynamic_output_shape_ops = True
49
+
50
+ # 量化
51
+ pipeline_quant_config = PipelineQuantizationConfig(
52
+ quant_backend="bitsandbytes_4bit",
53
+ quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
54
+ components_to_quantize=["transformer", "text_encoder_2"],
55
+ )
56
+ pipeline = DiffusionPipeline.from_pretrained(
57
+ "black-forest-labs/FLUX.1-dev",
58
+ quantization_config=pipeline_quant_config,
59
+ torch_dtype=torch.bfloat16,
60
+ ).to("cuda")
61
+
62
+ # 编译
63
+ pipeline.transformer.to(memory_format=torch.channels_last)
64
+ pipeline.transformer.compile(mode="max-autotune", fullgraph=True)
65
+ pipeline("""
66
+ cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California
67
+ highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain
68
+ """
69
+ ).images[0]
70
+ ```
71
+
72
+ ## 量化、torch.compile 和卸载
73
+
74
+ 除了量化和 torch.compile,如果您需要进一步减少内存使用,可以尝试卸载。卸载根据需要将各种层或模型组件从 CPU 移动到 GPU 进行计算。
75
+
76
+ 在卸载期间配置 [Dynamo](https://docs.pytorch.org/docs/stable/torch.compiler_dynamo_overview.html) `cache_size_limit` 以避免过多的重新编译,并设置 `capture_dynamic_output_shape_ops = True` 以在编译 bitsandbytes 模型时处理动态输出。
77
+
78
+ <hfoptions id="offloading">
79
+ <hfoption id="model CPU offloading">
80
+
81
+ [模型 CPU 卸载](./memory#model-offloading) 将单个管道组件(如 transformer 模型)在需要计算时移动到 GPU。否则,它会被卸载到 CPU。
82
+
83
+ ```py
84
+ import torch
85
+ from diffusers import DiffusionPipeline
86
+ from diffusers.quantizers import PipelineQuantizationConfig
87
+
88
+ torch._dynamo.config.cache_size_limit = 1000
89
+ torch._dynamo.config.capture_dynamic_output_shape_ops = True
90
+
91
+ # 量化
92
+ pipeline_quant_config = PipelineQuantizationConfig(
93
+ quant_backend="bitsandbytes_4bit",
94
+ quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
95
+ components_to_quantize=["transformer", "text_encoder_2"],
96
+ )
97
+ pipeline = DiffusionPipeline.from_pretrained(
98
+ "black-forest-labs/FLUX.1-dev",
99
+ quantization_config=pipeline_quant_config,
100
+ torch_dtype=torch.bfloat16,
101
+ ).to("cuda")
102
+
103
+ # 模型 CPU 卸载
104
+ pipeline.enable_model_cpu_offload()
105
+
106
+ # 编译
107
+ pipeline.transformer.compile()
108
+ pipeline(
109
+ "cinematic film still of a cat sipping a margarita in a pool in Palm Springs, California, highly detailed, high budget hollywood movie, cinemascope, moody, epic, gorgeous, film grain"
110
+ ).images[0]
111
+ ```
112
+
113
+ </hfoption>
114
+ <hfoption id="group offloading">
115
+
116
+ [组卸载](./memory#group-offloading) 将单个管道组件(如变换器模型)的内部层移动到 GPU 进行计算,并在不需要时将其卸载。同时,它使用 [CUDA 流](./memory#cuda-stream) 功能来预取下一层以执行。
117
+
118
+ 通过重叠计算和数据传输,它比模型 CPU 卸载更快,同时还能节省内存。
119
+
120
+ ```py
121
+ # pip install ftfy
122
+ import torch
123
+ from diffusers import AutoModel, DiffusionPipeline
124
+ from diffusers.hooks import apply_group_offloading
125
+ from diffusers.utils import export_to_video
126
+ from diffusers.quantizers import PipelineQuantizationConfig
127
+ from transformers import UMT5EncoderModel
128
+
129
+ torch._dynamo.config.cache_size_limit = 1000
130
+ torch._dynamo.config.capture_dynamic_output_shape_ops = True
131
+
132
+ # 量化
133
+ pipeline_quant_config = PipelineQuantizationConfig(
134
+ quant_backend="bitsandbytes_4bit",
135
+ quant_kwargs={"load_in_4bit": True, "bnb_4bit_quant_type": "nf4", "bnb_4bit_compute_dtype": torch.bfloat16},
136
+ components_to_quantize=["transformer", "text_encoder"],
137
+ )
138
+
139
+ text_encoder = UMT5EncoderModel.from_pretrained(
140
+ "Wan-AI/Wan2.1-T2V-14B-Diffusers", subfolder="text_encoder", torch_dtype=torch.bfloat16
141
+ )
142
+ pipeline = DiffusionPipeline.from_pretrained(
143
+ "Wan-AI/Wan2.1-T2V-14B-Diffusers",
144
+ quantization_config=pipeline_quant_config,
145
+ torch_dtype=torch.bfloat16,
146
+ ).to("cuda")
147
+
148
+ # 组卸载
149
+ onload_device = torch.device("cuda")
150
+ offload_device = torch.device("cpu")
151
+
152
+ pipeline.transformer.enable_group_offload(
153
+ onload_device=onload_device,
154
+ offload_device=offload_device,
155
+ offload_type="leaf_level",
156
+ use_stream=True,
157
+ non_blocking=True
158
+ )
159
+ pipeline.vae.enable_group_offload(
160
+ onload_device=onload_device,
161
+ offload_device=offload_device,
162
+ offload_type="leaf_level",
163
+ use_stream=True,
164
+ non_blocking=True
165
+ )
166
+ apply_group_offloading(
167
+ pipeline.text_encoder,
168
+ onload_device=onload_device,
169
+ offload_type="leaf_level",
170
+ use_stream=True,
171
+ non_blocking=True
172
+ )
173
+
174
+ # 编译
175
+ pipeline.transformer.compile()
176
+
177
+ prompt = """
178
+ The camera rushes from far to near in a low-angle shot,
179
+ revealing a white ferret on a log. It plays, leaps into the water, and emerges, as the camera zooms in
180
+ for a close-up. Water splashes berry bushes nearby, while moss, snow, and leaves blanket the ground.
181
+ Birch trees and a light blue sky frame the scene, with ferns in the foreground. Side lighting casts dynamic
182
+ shadows and warm highlights. Medium composition, front view, low angle, with depth of field.
183
+ """
184
+ negative_prompt = """
185
+ Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality,
186
+ low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured,
187
+ misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards
188
+ """
189
+
190
+ output = pipeline(
191
+ prompt=prompt,
192
+ negative_prompt=negative_prompt,
193
+ num_frames=81,
194
+ guidance_scale=5.0,
195
+ ).frames[0]
196
+ export_to_video(output, "output.mp4", fps=16)
197
+ ```
198
+
199
+ </hfoption>
200
+ </hfoptions>
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/tgate.md ADDED
@@ -0,0 +1,182 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # T-GATE
2
+
3
+ [T-GATE](https://github.com/HaozheLiu-ST/T-GATE/tree/main) 通过跳过交叉注意力计算一旦收敛,加速了 [Stable Diffusion](../api/pipelines/stable_diffusion/overview)、[PixArt](../api/pipelines/pixart) 和 [Latency Consistency Model](../api/pipelines/latent_consistency_models.md) 管道的推理。此方法不需要任何额外训练,可以将推理速度提高 10-50%。T-GATE 还与 [DeepCache](./deepcache) 等其他优化方法兼容。
4
+
5
+ 开始之前,请确保安装 T-GATE。
6
+
7
+ ```bash
8
+ pip install tgate
9
+ pip install -U torch diffusers transformers accelerate DeepCache
10
+ ```
11
+
12
+ 要使用 T-GATE 与管道,您需要使用其对应的加载器。
13
+
14
+ | 管道 | T-GATE 加载器 |
15
+ |---|---|
16
+ | PixArt | TgatePixArtLoader |
17
+ | Stable Diffusion XL | TgateSDXLLoader |
18
+ | Stable Diffusion XL + DeepCache | TgateSDXLDeepCacheLoader |
19
+ | Stable Diffusion | TgateSDLoader |
20
+ | Stable Diffusion + DeepCache | TgateSDDeepCacheLoader |
21
+
22
+ 接下来,创建一个 `TgateLoader`,包含管道、门限步骤(停止计算交叉注意力的时间步)和推理步骤数。然后在管道上调用 `tgate` 方法,提供提示、门限步骤和推理步骤数。
23
+
24
+ 让我们看看如何为几个不同的管道启用此功能。
25
+
26
+ <hfoptions id="pipelines">
27
+ <hfoption id="PixArt">
28
+
29
+ 使用 T-GATE 加速 `PixArtAlphaPipeline`:
30
+
31
+ ```py
32
+ import torch
33
+ from diffusers import PixArtAlphaPipeline
34
+ from tgate import TgatePixArtLoader
35
+
36
+ pipe = PixArtAlphaPipeline.from_pretrained("PixArt-alpha/PixArt-XL-2-1024-MS", torch_dtype=torch.float16)
37
+
38
+ gate_step = 8
39
+ inference_step = 25
40
+ pipe = TgatePixArtLoader(
41
+ pipe,
42
+ gate_step=gate_step,
43
+ num_inference_steps=inference_step,
44
+ ).to("cuda")
45
+
46
+ image = pipe.tgate(
47
+ "An alpaca made of colorful building blocks, cyberpunk.",
48
+ gate_step=gate_step,
49
+ num_inference_steps=inference_step,
50
+ ).images[0]
51
+ ```
52
+ </hfoption>
53
+ <hfoption id="Stable Diffusion XL">
54
+
55
+ 使用 T-GATE 加速 `StableDiffusionXLPipeline`:
56
+
57
+ ```py
58
+ import torch
59
+ from diffusers import StableDiffusionXLPipeline
60
+ from diffusers import DPMSolverMultistepScheduler
61
+ from tgate import TgateSDXLLoader
62
+
63
+ pipe = StableDiffusionXLPipeline.from_pretrained(
64
+ "stabilityai/stable-diffusion-xl-base-1.0",
65
+ torch_dtype=torch.float16,
66
+ variant="fp16",
67
+ use_safetensors=True,
68
+ )
69
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
70
+
71
+ gate_step = 10
72
+ inference_step = 25
73
+ pipe = TgateSDXLLoader(
74
+ pipe,
75
+ gate_step=gate_step,
76
+ num_inference_steps=inference_step,
77
+ ).to("cuda")
78
+
79
+ image = pipe.tgate(
80
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
81
+ gate_step=gate_step,
82
+ num_inference_steps=inference_step
83
+ ).images[0]
84
+ ```
85
+ </hfoption>
86
+ <hfoption id="StableDiffusionXL with DeepCache">
87
+
88
+ 使用 [DeepCache](https://github.co 加速 `StableDiffusionXLPipeline`
89
+ m/horseee/DeepCache) 和 T-GATE:
90
+
91
+ ```py
92
+ import torch
93
+ from diffusers import StableDiffusionXLPipeline
94
+ from diffusers import DPMSolverMultistepScheduler
95
+ from tgate import TgateSDXLDeepCacheLoader
96
+
97
+ pipe = StableDiffusionXLPipeline.from_pretrained(
98
+ "stabilityai/stable-diffusion-xl-base-1.0",
99
+ torch_dtype=torch.float16,
100
+ variant="fp16",
101
+ use_safetensors=True,
102
+ )
103
+ pipe.scheduler = DPMSolverMultistepScheduler.from_config(pipe.scheduler.config)
104
+
105
+ gate_step = 10
106
+ inference_step = 25
107
+ pipe = TgateSDXLDeepCacheLoader(
108
+ pipe,
109
+ cache_interval=3,
110
+ cache_branch_id=0,
111
+ ).to("cuda")
112
+
113
+ image = pipe.tgate(
114
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
115
+ gate_step=gate_step,
116
+ num_inference_steps=inference_step
117
+ ).images[0]
118
+ ```
119
+ </hfoption>
120
+ <hfoption id="Latent Consistency Model">
121
+
122
+ 使用 T-GATE 加速 `latent-consistency/lcm-sdxl`:
123
+
124
+ ```py
125
+ import torch
126
+ from diffusers import StableDiffusionXLPipeline
127
+ from diffusers import UNet2DConditionModel, LCMScheduler
128
+ from diffusers import DPMSolverMultistepScheduler
129
+ from tgate import TgateSDXLLoader
130
+
131
+ unet = UNet2DConditionModel.from_pretrained(
132
+ "latent-consistency/lcm-sdxl",
133
+ torch_dtype=torch.float16,
134
+ variant="fp16",
135
+ )
136
+ pipe = StableDiffusionXLPipeline.from_pretrained(
137
+ "stabilityai/stable-diffusion-xl-base-1.0",
138
+ unet=unet,
139
+ torch_dtype=torch.float16,
140
+ variant="fp16",
141
+ )
142
+ pipe.scheduler = LCMScheduler.from_config(pipe.scheduler.config)
143
+
144
+ gate_step = 1
145
+ inference_step = 4
146
+ pipe = TgateSDXLLoader(
147
+ pipe,
148
+ gate_step=gate_step,
149
+ num_inference_steps=inference_step,
150
+ lcm=True
151
+ ).to("cuda")
152
+
153
+ image = pipe.tgate(
154
+ "Astronaut in a jungle, cold color palette, muted colors, detailed, 8k.",
155
+ gate_step=gate_step,
156
+ num_inference_steps=inference_step
157
+ ).images[0]
158
+ ```
159
+ </hfoption>
160
+ </hfoptions>
161
+
162
+ T-GATE 还支持 [`StableDiffusionPipeline`] 和 [PixArt-alpha/PixArt-LCM-XL-2-1024-MS](https://hf.co/PixArt-alpha/PixArt-LCM-XL-2-1024-MS)。
163
+
164
+ ## 基准��试
165
+ | 模型 | MACs | 参数 | 延迟 | 零样本 10K-FID on MS-COCO |
166
+ |-----------------------|----------|-----------|---------|---------------------------|
167
+ | SD-1.5 | 16.938T | 859.520M | 7.032s | 23.927 |
168
+ | SD-1.5 w/ T-GATE | 9.875T | 815.557M | 4.313s | 20.789 |
169
+ | SD-2.1 | 38.041T | 865.785M | 16.121s | 22.609 |
170
+ | SD-2.1 w/ T-GATE | 22.208T | 815.433 M | 9.878s | 19.940 |
171
+ | SD-XL | 149.438T | 2.570B | 53.187s | 24.628 |
172
+ | SD-XL w/ T-GATE | 84.438T | 2.024B | 27.932s | 22.738 |
173
+ | Pixart-Alpha | 107.031T | 611.350M | 61.502s | 38.669 |
174
+ | Pixart-Alpha w/ T-GATE | 65.318T | 462.585M | 37.867s | 35.825 |
175
+ | DeepCache (SD-XL) | 57.888T | - | 19.931s | 23.755 |
176
+ | DeepCache 配合 T-GATE | 43.868T | - | 14.666秒 | 23.999 |
177
+ | LCM (SD-XL) | 11.955T | 2.570B | 3.805秒 | 25.044 |
178
+ | LCM 配合 T-GATE | 11.171T | 2.024B | 3.533秒 | 25.028 |
179
+ | LCM (Pixart-Alpha) | 8.563T | 611.350M | 4.733秒 | 36.086 |
180
+ | LCM 配合 T-GATE | 7.623T | 462.585M | 4.543秒 | 37.048 |
181
+
182
+ 延迟测试基于 NVIDIA 1080TI,MACs 和 Params 使用 [calflops](https://github.com/MrYxJ/calculate-flops.pytorch) 计算,FID 使用 [PytorchFID](https://github.com/mseitzer/pytorch-fid) 计算。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/tome.md ADDED
@@ -0,0 +1,90 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 The HuggingFace Team。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版(“许可证”)授权;除非遵守许可证,否则不得使用此文件。
4
+ 您可以在以下网址获取许可证副本:
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。请参阅许可证以了解具体的语言管理权限和限制。
9
+ -->
10
+
11
+ # 令牌合并
12
+
13
+ [令牌合并](https://huggingface.co/papers/2303.17604)(ToMe)在基于 Transformer 的网络的前向传递中逐步合并冗余令牌/补丁,这可以加速 [`StableDiffusionPipeline`] 的推理延迟。
14
+
15
+ 从 `pip` 安装 ToMe:
16
+
17
+ ```bash
18
+ pip install tomesd
19
+ ```
20
+
21
+ 您可以使用 [`tomesd`](https://github.com/dbolya/tomesd) 库中的 [`apply_patch`](https://github.com/dbolya/tomesd?tab=readme-ov-file#usage) 函数:
22
+
23
+ ```diff
24
+ from diffusers import StableDiffusionPipeline
25
+ import torch
26
+ import tomesd
27
+
28
+ pipeline = StableDiffusionPipeline.from_pretrained(
29
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True,
30
+ ).to("cuda")
31
+ + tomesd.apply_patch(pipeline, ratio=0.5)
32
+
33
+ image = pipeline("a photo of an astronaut riding a horse on mars").images[0]
34
+ ```
35
+
36
+ `apply_patch` 函数公开了多个[参数](https://github.com/dbolya/tomesd#usage),以帮助在管道推理速度和生成令牌的质量之间取得平衡。最重要的参数是 `ratio`,它控制在前向传递期间合并的令牌数量。
37
+
38
+ 如[论文](https://huggingface.co/papers/2303.17604)中所述,ToMe 可以在显著提升推理速度的同时,很大程度上保留生成图像的质量。通过增加 `ratio`,您可以进一步加速推理,但代价是图像质量有所下降。
39
+
40
+ 为了测试生成图像的质量,我们从 [Parti Prompts](https://parti.research.google/) 中采样了一些提示,并使用 [`StableDiffusionPipeline`] 进行了推理,设置如下:
41
+
42
+ <div class="flex justify-center">
43
+ <img src="https://huggingface.co/datasets/diffusers/docs-images/resolve/main/tome/tome_samples.png">
44
+ </div>
45
+
46
+ 我们没有注意到生成样本的质量有任何显著下降,您可以在此 [WandB 报告](https://wandb.ai/sayakpaul/tomesd-results/runs/23j4bj3i?workspace=)中查看生成的样本。如果您有兴趣重现此实验,请使用此[脚本](https://gist.github.com/sayakpaul/8cac98d7f22399085a060992f411ecbd)。
47
+
48
+ ## 基准测试
49
+
50
+ 我们还在启用 [xFormers](https://huggingface.co/docs/diffusers/optimization/xformers) 的情况下,对 [`StableDiffusionPipeline`] 上 `tomesd` 的影响进行了基准测试,涵盖了多个图像分辨率。结果
51
+ 结果是从以下开发环境中的A100和V100 GPU获得的:
52
+
53
+ ```bash
54
+ - `diffusers` 版本:0.15.1
55
+ - Python 版本:3.8.16
56
+ - PyTorch 版本(GPU?):1.13.1+cu116 (True)
57
+ - Huggingface_hub 版本:0.13.2
58
+ - Transformers 版本:4.27.2
59
+ - Accelerate 版本:0.18.0
60
+ - xFormers 版本:0.0.16
61
+ - tomesd 版本:0.1.2
62
+ ```
63
+
64
+ 要重现此基准测试,请随意使用此[脚本](https://gist.github.com/sayakpaul/27aec6bca7eb7b0e0aa4112205850335)。结果以秒为单位报告,并且在适用的情况下,我们报告了使用ToMe和ToMe + xFormers时相对于原始管道的加速百分比。
65
+
66
+ | **GPU** | **分辨率** | **批处理大小** | **原始** | **ToMe** | **ToMe + xFormers** |
67
+ |----------|----------------|----------------|-------------|----------------|---------------------|
68
+ | **A100** | 512 | 10 | 6.88 | 5.26 (+23.55%) | 4.69 (+31.83%) |
69
+ | | 768 | 10 | OOM | 14.71 | 11 |
70
+ | | | 8 | OOM | 11.56 | 8.84 |
71
+ | | | 4 | OOM | 5.98 | 4.66 |
72
+ | | | 2 | 4.99 | 3.24 (+35.07%) | 2.1 (+37.88%) |
73
+ | | | 1 | 3.29 | 2.24 (+31.91%) | 2.03 (+38.3%) |
74
+ | | 1024 | 10 | OOM | OOM | OOM |
75
+ | | | 8 | OOM | OOM | OOM |
76
+ | | | 4 | OOM | 12.51 | 9.09 |
77
+ | | | 2 | OOM | 6.52 | 4.96 |
78
+ | | | 1 | 6.4 | 3.61 (+43.59%) | 2.81 (+56.09%) |
79
+ | **V100** | 512 | 10 | OOM | 10.03 | 9.29 |
80
+ | | | 8 | OOM | 8.05 | 7.47 |
81
+ | | | 4 | 5.7 | 4.3 (+24.56%) | 3.98 (+30.18%) |
82
+ | | | 2 | 3.14 | 2.43 (+22.61%) | 2.27 (+27.71%) |
83
+ | | | 1 | 1.88 | 1.57 (+16.49%) | 1.57 (+16.49%) |
84
+ | | 768 | 10 | OOM | OOM | 23.67 |
85
+ | | | 8 | OOM | OOM | 18.81 |
86
+ | | | 4 | OOM | 11.81 | 9.7 |
87
+ | | | 2 | OOM | 6.27 | 5.2 |
88
+ | | | 1 | 5.43 | 3.38 (+37.75%) | 2.82 (+48.07%) |
89
+ | | 1024 | 10 | OOM |
90
+ 如上表所示,`tomesd` 带来的加速效果在更大的图像分辨率下变得更加明显。有趣的是,使用 `tomesd` 可以在更高分辨率如 1024x1024 上运行管道。您可能还可以通过 [`torch.compile`](fp16#torchcompile) 进一步加速推理。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/xdit.md ADDED
@@ -0,0 +1,119 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # xDiT
2
+
3
+ [xDiT](https://github.com/xdit-project/xDiT) 是一个推理引擎,专为大规模并行部署扩散变换器(DiTs)而设计。xDiT 提供了一套用于扩散模型的高效并行方法,以及 GPU 内核加速。
4
+
5
+ xDiT 支持四种并行方法,包括[统一序列并行](https://huggingface.co/papers/2405.07719)、[PipeFusion](https://huggingface.co/papers/2405.14430)、CFG 并行和数据并行。xDiT 中的这四种并行方法可以以混合方式配置,优化通信模式以最适合底层网络硬件。
6
+
7
+ 与并行化正交的优化侧重于加速单个 GPU 的性能。除了利用知名的注意力优化库外,我们还利用编译加速技术,如 torch.compile 和 onediff。
8
+
9
+ xDiT 的概述如下所示。
10
+
11
+ <div class="flex justify-center">
12
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/methods/xdit_overview.png">
13
+ </div>
14
+ 您可以使用以下命令安装 xDiT:
15
+
16
+ ```bash
17
+ pip install xfuser
18
+ ```
19
+
20
+ 以下是一个使用 xDiT 加速 Diffusers 模型推理的示例。
21
+
22
+ ```diff
23
+ import torch
24
+ from diffusers import StableDiffusion3Pipeline
25
+
26
+ from xfuser import xFuserArgs, xDiTParallel
27
+ from xfuser.config import FlexibleArgumentParser
28
+ from xfuser.core.distributed import get_world_group
29
+
30
+ def main():
31
+ + parser = FlexibleArgumentParser(description="xFuser Arguments")
32
+ + args = xFuserArgs.add_cli_args(parser).parse_args()
33
+ + engine_args = xFuserArgs.from_cli_args(args)
34
+ + engine_config, input_config = engine_args.create_config()
35
+
36
+ local_rank = get_world_group().local_rank
37
+ pipe = StableDiffusion3Pipeline.from_pretrained(
38
+ pretrained_model_name_or_path=engine_config.model_config.model,
39
+ torch_dtype=torch.float16,
40
+ ).to(f"cuda:{local_rank}")
41
+
42
+ # 在这里对管道进行任何操作
43
+
44
+ + pipe = xDiTParallel(pipe, engine_config, input_config)
45
+
46
+ pipe(
47
+ height=input_config.height,
48
+ width=input_config.height,
49
+ prompt=input_config.prompt,
50
+ num_inference_steps=input_config.num_inference_steps,
51
+ output_type=input_config.output_type,
52
+ generator=torch.Generator(device="cuda").manual_seed(input_config.seed),
53
+ )
54
+
55
+ + if input_config.output_type == "pil":
56
+ + pipe.save("results", "stable_diffusion_3")
57
+
58
+ if __name__ == "__main__":
59
+ main()
60
+ ```
61
+
62
+ 如您所见,我们只需要使用 xDiT 中的 xFuserArgs 来获取配置参数,并将这些参数与来自 Diffusers 库的管道对象一起传递给 xDiTParallel,即可完成对 Diffusers 中特定管道的并行化。
63
+
64
+ xDiT 运行时参数可以在命令行中使用 `-h` 查看,您可以参考此[使用](https://github.com/xdit-project/xDiT?tab=readme-ov-file#2-usage)示例以获取更多详细信息。
65
+ ils。
66
+
67
+ xDiT 需要使用 torchrun 启动,以支持其多节点、多 GPU 并行能力。例如,以下命令可用于 8-GPU 并行推理:
68
+
69
+ ```bash
70
+ torchrun --nproc_per_node=8 ./inference.py --model models/FLUX.1-dev --data_parallel_degree 2 --ulysses_degree 2 --ring_degree 2 --prompt "A snowy mountain" "A small dog" --num_inference_steps 50
71
+ ```
72
+
73
+ ## 支持的模型
74
+
75
+ 在 xDiT 中支持 Diffusers 模型的一个子集,例如 Flux.1、Stable Diffusion 3 等。最新支持的模型可以在[这里](https://github.com/xdit-project/xDiT?tab=readme-ov-file#-supported-dits)找到。
76
+
77
+ ## 基准测试
78
+ 我们在不同机器上测试了各种模型,以下是一些基准数据。
79
+
80
+ ### Flux.1-schnell
81
+ <div class="flex justify-center">
82
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/flux/Flux-2k-L40.png">
83
+ </div>
84
+
85
+ <div class="flex justify-center">
86
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/flux/Flux-2K-A100.png">
87
+ </div>
88
+
89
+ ### Stable Diffusion 3
90
+ <div class="flex justify-center">
91
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/sd3/L40-SD3.png">
92
+ </div>
93
+
94
+ <div class="flex justify-center">
95
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/sd3/A100-SD3.png">
96
+ </div>
97
+
98
+ ### HunyuanDiT
99
+ <div class="flex justify-center">
100
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/hunuyuandit/L40-HunyuanDiT.png">
101
+ </div>
102
+
103
+ <div class="flex justify-center">
104
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/hunuyuandit/V100-HunyuanDiT.png">
105
+ </div>
106
+
107
+ <div class="flex justify-center">
108
+ <img src="https://huggingface.co/datasets/xDiT/documentation-images/resolve/main/performance/hunuyuandit/T4-HunyuanDiT.png">
109
+ </div>
110
+
111
+ 更详细的性能指标可以在我们的 [GitHub 页面](https://github.com/xdit-project/xDiT?tab=readme-ov-file#perf) 上找到。
112
+
113
+ ## 参考文献
114
+
115
+ [xDiT-project](https://github.com/xdit-project/xDiT)
116
+
117
+ [USP: A Unified Sequence Parallelism Approach for Long Context Generative AI](https://huggingface.co/papers/2405.07719)
118
+
119
+ [PipeFusion: Displaced Patch Pipeline Parallelism for Inference of Diffusion Transformer Models](https://huggingface.co/papers/2405.14430)
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/optimization/xformers.md ADDED
@@ -0,0 +1,32 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权归2025年HuggingFace团队所有。保留所有权利。
2
+
3
+ 根据Apache许可证2.0版("许可证")授权;除非符合许可证要求,否则不得使用本文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言及限制条款。
8
+ -->
9
+
10
+ # xFormers
11
+
12
+ 我们推荐在推理和训练过程中使用[xFormers](https://github.com/facebookresearch/xformers)。在我们的测试中,其对注意力模块的优化能同时提升运行速度并降低内存消耗。
13
+
14
+ 通过`pip`安装xFormers:
15
+
16
+ ```bash
17
+ pip install xformers
18
+ ```
19
+
20
+ <Tip>
21
+
22
+ xFormers的`pip`安装包需要最新版本的PyTorch。如需使用旧版PyTorch,建议[从源码安装xFormers](https://github.com/facebookresearch/xformers#installing-xformers)。
23
+
24
+ </Tip>
25
+
26
+ 安装完成后,您可调用`enable_xformers_memory_efficient_attention()`来实现更快的推理速度和更低的内存占用,具体用法参见[此章节](memory#memory-efficient-attention)。
27
+
28
+ <Tip warning={true}>
29
+
30
+ 根据[此问题](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212)反馈,xFormers `v0.0.16`版本在某些GPU上无法用于训练(微调或DreamBooth)。如遇此问题,请按照该issue评论区指引安装开发版本。
31
+
32
+ </Tip>
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/adapt_a_model.md ADDED
@@ -0,0 +1,47 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # 将模型适配至新任务
2
+
3
+ 许多扩散系统共享相同的组件架构,这使得您能够将针对某一任务预训练的模型调整适配至完全不同的新任务。
4
+
5
+ 本指南将展示如何通过初始化并修改预训练 [`UNet2DConditionModel`] 的架构,将文生图预训练模型改造为图像修复(inpainting)模型。
6
+
7
+ ## 配置 UNet2DConditionModel 参数
8
+
9
+ 默认情况下,[`UNet2DConditionModel`] 的[输入样本](https://huggingface.co/docs/diffusers/v0.16.0/en/api/models#diffusers.UNet2DConditionModel.in_channels)接受4个通道。例如加载 [`stable-diffusion-v1-5/stable-diffusion-v1-5`](https://huggingface.co/stable-diffusion-v1-5/stable-diffusion-v1-5) 这样的文生图预训练模型,查看其 `in_channels` 参数值:
10
+
11
+ ```python
12
+ from diffusers import StableDiffusionPipeline
13
+
14
+ pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", use_safetensors=True)
15
+ pipeline.unet.config["in_channels"]
16
+ 4
17
+ ```
18
+
19
+ 而图像修复任务需要输入样本具有9个通道。您可以在 [`runwayml/stable-diffusion-inpainting`](https://huggingface.co/runwayml/stable-diffusion-inpainting) 这样的预训练修复模型中验证此参数:
20
+
21
+ ```python
22
+ from diffusers import StableDiffusionPipeline
23
+
24
+ pipeline = StableDiffusionPipeline.from_pretrained("runwayml/stable-diffusion-inpainting", use_safetensors=True)
25
+ pipeline.unet.config["in_channels"]
26
+ 9
27
+ ```
28
+
29
+ 要将文生图模型改造为修复模型,您需要将 `in_channels` 参数从4调整为9。
30
+
31
+ 初始化一个加载了文生图预训练权重的 [`UNet2DConditionModel`],并将 `in_channels` 设为9。由于输入通道数变化导致张量形状改变,需要设置 `ignore_mismatched_sizes=True` 和 `low_cpu_mem_usage=False` 来避免尺寸不匹配错误。
32
+
33
+ ```python
34
+ from diffusers import AutoModel
35
+
36
+ model_id = "stable-diffusion-v1-5/stable-diffusion-v1-5"
37
+ unet = AutoModel.from_pretrained(
38
+ model_id,
39
+ subfolder="unet",
40
+ in_channels=9,
41
+ low_cpu_mem_usage=False,
42
+ ignore_mismatched_sizes=True,
43
+ use_safetensors=True,
44
+ )
45
+ ```
46
+
47
+ 此时文生图模型的其他组件权重仍保持预训练状态,但UNet的输入卷积层权重(`conv_in.weight`)会随机初始化。由于这一关键变化,必须对模型进行修复任务的微调,否则模型将仅会输出噪声。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/controlnet.md ADDED
@@ -0,0 +1,366 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
+ the License. You may obtain a copy of the License at
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
+ specific language governing permissions and limitations under the License.
11
+ -->
12
+
13
+ # ControlNet
14
+
15
+ [ControlNet](https://hf.co/papers/2302.05543) 是一种基于预训练模型的适配器架构。它通过额外输入的条件图像(如边缘检测图、深度图、人体姿态图等),实现对生成图像的精细化控制。
16
+
17
+ 在显存有限的GPU上训练时,建议启用训练命令中的 `gradient_checkpointing`(梯度检查点)、`gradient_accumulation_steps`(梯度累积步数)和 `mixed_precision`(混合精度)参数。还可使用 [xFormers](../optimization/xformers) 的内存高效注意力机制进一步降低显存占用。虽然JAX/Flax训练支持在TPU和GPU上高效运行,但不支持梯度检查点和xFormers。若需通过Flax加速训练,建议使用显存大于30GB的GPU。
18
+
19
+ 本指南将解析 [train_controlnet.py](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py) 训练脚本,帮助您理解其逻辑并适配自定义需求。
20
+
21
+ 运行脚本前,请确保从源码安装库:
22
+
23
+ ```bash
24
+ git clone https://github.com/huggingface/diffusers
25
+ cd diffusers
26
+ pip install .
27
+ ```
28
+
29
+ 然后进入包含训练脚本的示例目录,安装所需依赖:
30
+
31
+ <hfoptions id="installation">
32
+ <hfoption id="PyTorch">
33
+ ```bash
34
+ cd examples/controlnet
35
+ pip install -r requirements.txt
36
+ ```
37
+ </hfoption>
38
+ <hfoption id="Flax">
39
+
40
+ 若可访问TPU设备,Flax训练脚本将运行得更快!以下是在 [Google Cloud TPU VM](https://cloud.google.com/tpu/docs/run-calculation-jax) 上的配置流程。创建单个TPU v4-8虚拟机并连接:
41
+
42
+ ```bash
43
+ ZONE=us-central2-b
44
+ TPU_TYPE=v4-8
45
+ VM_NAME=hg_flax
46
+
47
+ gcloud alpha compute tpus tpu-vm create $VM_NAME \
48
+ --zone $ZONE \
49
+ --accelerator-type $TPU_TYPE \
50
+ --version tpu-vm-v4-base
51
+
52
+ gcloud alpha compute tpus tpu-vm ssh $VM_NAME --zone $ZONE -- \
53
+ ```
54
+
55
+ 安装JAX 0.4.5:
56
+
57
+ ```bash
58
+ pip install "jax[tpu]==0.4.5" -f https://storage.googleapis.com/jax-releases/libtpu_releases.html
59
+ ```
60
+
61
+ 然后安装Flax脚本的依赖:
62
+
63
+ ```bash
64
+ cd examples/controlnet
65
+ pip install -r requirements_flax.txt
66
+ ```
67
+
68
+ </hfoption>
69
+ </hfoptions>
70
+
71
+ <Tip>
72
+
73
+ 🤗 Accelerate 是一个支持多GPU/TPU训练和混合精度的库,它能根据硬件环境自动配置训练方案。参阅 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 了解更多。
74
+
75
+ </Tip>
76
+
77
+ 初始化🤗 Accelerate环境:
78
+
79
+ ```bash
80
+ accelerate config
81
+ ```
82
+
83
+ 若要创建默认配置(不进行交互式选择):
84
+
85
+ ```bash
86
+ accelerate config default
87
+ ```
88
+
89
+ 若环境不支持交互式shell(如notebook),可使用:
90
+
91
+ ```py
92
+ from accelerate.utils import write_basic_config
93
+
94
+ write_basic_config()
95
+ ```
96
+
97
+ 最后,如需训练自定义数据集,请参阅 [创建训练数据集](create_dataset) 指南了解数据准备方法。
98
+
99
+ <Tip>
100
+
101
+ 下文重点解析脚本中的关键模块,但不会覆盖所有实现细节。如需深入了解,建议直接阅读 [脚本源码](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet.py),如有疑问欢迎反馈。
102
+
103
+ </Tip>
104
+
105
+ ## 脚本参数
106
+
107
+ 训练脚本提供了丰富的可配置参数,所有参数及其说明详见 [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L231) 函数。虽然该函数已为每个参数提供默认值(如训练批大小、学习率等),但您可以通过命令行参数覆盖这些默认值。
108
+
109
+ 例如,使用fp16混合精度加速训练, 可使用`--mixed_precision`参数
110
+
111
+ ```bash
112
+ accelerate launch train_controlnet.py \
113
+ --mixed_precision="fp16"
114
+ ```
115
+
116
+ 基础参数说明可参考 [文生图](text2image#script-parameters) 训练指南,此处重点介绍ControlNet相关参数:
117
+
118
+ - `--max_train_samples`: 训练样本数量,减少该值可加快训练,但对超大数据集需配合 `--streaming` 参数使用
119
+ - `--gradient_accumulation_steps`: 梯度累积步数,通过分步计算实现显存受限情况下的更大批次训练
120
+
121
+ ### Min-SNR加权策略
122
+
123
+ [Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略通过重新平衡损失函数加速模型收敛。虽然训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,但Min-SNR对两种预测类型均兼容。该策略仅适用于PyTorch版本,Flax训练脚本暂不支持。
124
+
125
+ 推荐值设为5.0:
126
+
127
+ ```bash
128
+ accelerate launch train_controlnet.py \
129
+ --snr_gamma=5.0
130
+ ```
131
+
132
+ ## 训��脚本
133
+
134
+ 与参数说明类似,训练流程的通用解析可参考 [文生图](text2image#training-script) 指南。此处重点分析ControlNet特有的实现。
135
+
136
+ 脚本中的 [`make_train_dataset`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L582) 函数负责数据预处理,除常规的文本标注分词和图像变换外,还包含条件图像的特效处理:
137
+
138
+ <Tip>
139
+
140
+ 在TPU上流式加载数据集时,🤗 Datasets库可能成为性能瓶颈(因其未针对图像数据优化)。建议考虑 [WebDataset](https://webdataset.github.io/webdataset/)、[TorchData](https://github.com/pytorch/data) 或 [TensorFlow Datasets](https://www.tensorflow.org/datasets/tfless_tfds) 等高效数据格式。
141
+
142
+ </Tip>
143
+
144
+ ```py
145
+ conditioning_image_transforms = transforms.Compose(
146
+ [
147
+ transforms.Resize(args.resolution, interpolation=transforms.InterpolationMode.BILINEAR),
148
+ transforms.CenterCrop(args.resolution),
149
+ transforms.ToTensor(),
150
+ ]
151
+ )
152
+ ```
153
+
154
+ 在 [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L713) 函数中,代码会加载分词器、文本编码器、调度器和模型。此处也是ControlNet模型的加载点(支持从现有权重加载或从UNet随机初始化):
155
+
156
+ ```py
157
+ if args.controlnet_model_name_or_path:
158
+ logger.info("Loading existing controlnet weights")
159
+ controlnet = ControlNetModel.from_pretrained(args.controlnet_model_name_or_path)
160
+ else:
161
+ logger.info("Initializing controlnet weights from unet")
162
+ controlnet = ControlNetModel.from_unet(unet)
163
+ ```
164
+
165
+ [优化器](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L871) 专门针对ControlNet参数进行更新:
166
+
167
+ ```py
168
+ params_to_optimize = controlnet.parameters()
169
+ optimizer = optimizer_class(
170
+ params_to_optimize,
171
+ lr=args.learning_rate,
172
+ betas=(args.adam_beta1, args.adam_beta2),
173
+ weight_decay=args.adam_weight_decay,
174
+ eps=args.adam_epsilon,
175
+ )
176
+ ```
177
+
178
+ 在 [训练循环](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/controlnet/train_controlnet.py#L943) 中,条件文本嵌入和图像被输入到ControlNet的下采样和中层模块:
179
+
180
+ ```py
181
+ encoder_hidden_states = text_encoder(batch["input_ids"])[0]
182
+ controlnet_image = batch["conditioning_pixel_values"].to(dtype=weight_dtype)
183
+
184
+ down_block_res_samples, mid_block_res_sample = controlnet(
185
+ noisy_latents,
186
+ timesteps,
187
+ encoder_hidden_states=encoder_hidden_states,
188
+ controlnet_cond=controlnet_image,
189
+ return_dict=False,
190
+ )
191
+ ```
192
+
193
+ 若想深入理解训练循环机制,可参阅 [理解管道、模型与调度器](../using-diffusers/write_own_pipeline) 教程,该教程详细解析了去噪过程的基本原理。
194
+
195
+ ## 启动训练
196
+
197
+ 现在可以启动训练脚本了!🚀
198
+
199
+ 本指南使用 [fusing/fill50k](https://huggingface.co/datasets/fusing/fill50k) 数据集,当然您也可以按照 [创建训练数据集](create_dataset) 指南准备自定义数据。
200
+
201
+ 设置环境变量 `MODEL_NAME` 为Hub模型ID或本地路径,`OUTPUT_DIR` 为模型保存路径。
202
+
203
+ 下载训练用的条件图像:
204
+
205
+ ```bash
206
+ wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_1.png
207
+ wget https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/controlnet_training/conditioning_image_2.png
208
+ ```
209
+
210
+ 根据GPU型号,可能需要启用特定优化。默认配置需要约38GB显存。若使用多GPU训练,请在 `accelerate launch` 命令中添加 `--multi_gpu` 参数。
211
+
212
+ <hfoptions id="gpu-select">
213
+ <hfoption id="16GB">
214
+
215
+ 16GB显卡可使用bitsandbytes 8-bit优化器和梯度检查点:
216
+
217
+ ```py
218
+ pip install bitsandbytes
219
+ ```
220
+
221
+ 训练命令添加以下参数:
222
+
223
+ ```bash
224
+ accelerate launch train_controlnet.py \
225
+ --gradient_checkpointing \
226
+ --use_8bit_adam \
227
+ ```
228
+
229
+ </hfoption>
230
+ <hfoption id="12GB">
231
+
232
+ 12GB显卡需组合使用bitsandbytes 8-bit优化器、梯度检查点、xFormers,并将梯度置为None而非0:
233
+
234
+ ```bash
235
+ accelerate launch train_controlnet.py \
236
+ --use_8bit_adam \
237
+ --gradient_checkpointing \
238
+ --enable_xformers_memory_efficient_attention \
239
+ --set_grads_to_none \
240
+ ```
241
+
242
+ </hfoption>
243
+ <hfoption id="8GB">
244
+
245
+ 8GB显卡需使用 [DeepSpeed](https://www.deepspeed.ai/) 将张量卸载到CPU或NVME:
246
+
247
+ 运行以下命令配置环境:
248
+
249
+ ```bash
250
+ accelerate config
251
+ ```
252
+
253
+ 选择DeepSpeed stage 2,结合fp16混合精度和参数卸载到CPU的方案。注意这会增加约25GB内存占用。配置示例如下:
254
+
255
+ ```bash
256
+ compute_environment: LOCAL_MACHINE
257
+ deepspeed_config:
258
+ gradient_accumulation_steps: 4
259
+ offload_optimizer_device: cpu
260
+ offload_param_device: cpu
261
+ zero3_init_flag: false
262
+ zero_stage: 2
263
+ distributed_type: DEEPSPEED
264
+ ```
265
+
266
+ 建议将优化器替换为DeepSpeed特化版 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu),注意CUDA工具链版本需与PyTorch匹配。
267
+
268
+ 当前bitsandbytes与DeepSpeed存在兼容性问题。
269
+
270
+ 无需额外添加训练参数。
271
+
272
+ </hfoption>
273
+ </hfoptions>
274
+
275
+ <hfoptions id="training-inference">
276
+ <hfoption id="PyTorch">
277
+
278
+ ```bash
279
+ export MODEL_DIR="stable-diffusion-v1-5/stable-diffusion-v1-5"
280
+ export OUTPUT_DIR="path/to/save/model"
281
+
282
+ accelerate launch train_controlnet.py \
283
+ --pretrained_model_name_or_path=$MODEL_DIR \
284
+ --output_dir=$OUTPUT_DIR \
285
+ --dataset_name=fusing/fill50k \
286
+ --resolution=512 \
287
+ --learning_rate=1e-5 \
288
+ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
289
+ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
290
+ --train_batch_size=1 \
291
+ --gradient_accumulation_steps=4 \
292
+ --push_to_hub
293
+ ```
294
+
295
+ </hfoption>
296
+ <hfoption id="Flax">
297
+
298
+ Flax版本支持通过 `--profile_steps==5` 参数进行性能分析:
299
+
300
+ ```bash
301
+ pip install tensorflow tensorboard-plugin-profile
302
+ tensorboard --logdir runs/fill-circle-100steps-20230411_165612/
303
+ ```
304
+
305
+ 在 [http://localhost:6006/#profile](http://localhost:6006/#profile) 查看分析结果。
306
+
307
+ <Tip warning={true}>
308
+
309
+ 若遇到插件版本冲突,建议重新安装TensorFlow和Tensorboard。注意性能分析插件仍处实验阶段,部分视图可能不完整。`trace_viewer` 会截断超过1M的事件记录,在编译步骤分析时可能导致设备轨迹丢失。
310
+
311
+ </Tip>
312
+
313
+ ```bash
314
+ python3 train_controlnet_flax.py \
315
+ --pretrained_model_name_or_path=$MODEL_DIR \
316
+ --output_dir=$OUTPUT_DIR \
317
+ --dataset_name=fusing/fill50k \
318
+ --resolution=512 \
319
+ --learning_rate=1e-5 \
320
+ --validation_image "./conditioning_image_1.png" "./conditioning_image_2.png" \
321
+ --validation_prompt "red circle with blue background" "cyan circle with brown floral background" \
322
+ --validation_steps=1000 \
323
+ --train_batch_size=2 \
324
+ --revision="non-ema" \
325
+ --from_pt \
326
+ --report_to="wandb" \
327
+ --tracker_project_name=$HUB_MODEL_ID \
328
+ --num_train_epochs=11 \
329
+ --push_to_hub \
330
+ --hub_model_id=$HUB_MODEL_ID
331
+ ```
332
+
333
+ </hfoption>
334
+ </hfoptions>
335
+
336
+ 训练完成后即可进行推理:
337
+
338
+ ```py
339
+ from diffusers import StableDiffusionControlNetPipeline, ControlNetModel
340
+ from diffusers.utils import load_image
341
+ import torch
342
+
343
+ controlnet = ControlNetModel.from_pretrained("path/to/controlnet", torch_dtype=torch.float16)
344
+ pipeline = StableDiffusionControlNetPipeline.from_pretrained(
345
+ "path/to/base/model", controlnet=controlnet, torch_dtype=torch.float16
346
+ ).to("cuda")
347
+
348
+ control_image = load_image("./conditioning_image_1.png")
349
+ prompt = "pale golden rod circle with old lace background"
350
+
351
+ generator = torch.manual_seed(0)
352
+ image = pipeline(prompt, num_inference_steps=20, generator=generator, image=control_image).images[0]
353
+ image.save("./output.png")
354
+ ```
355
+
356
+ ## Stable Diffusion XL
357
+
358
+ Stable Diffusion XL (SDXL) 是新一代文生图模型,通过添加第二文本编码器支持生成更高分辨率图像。使用 [`train_controlnet_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/controlnet/train_controlnet_sdxl.py) 脚本可为SDXL训练ControlNet适配器。
359
+
360
+ SDXL训练脚本的详细解析请参阅 [SDXL训练](sdxl) 指南。
361
+
362
+ ## 后续步骤
363
+
364
+ 恭喜完成ControlNet训练!如需进一步了解模型应用,以下指南可能有所帮助:
365
+
366
+ - 学习如何 [使用ControlNet](../using-diffusers/controlnet) 进行多样化任务的推理
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/distributed_inference.md ADDED
@@ -0,0 +1,239 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 The HuggingFace Team。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版本(“许可证”)授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。请参阅许可证了解具体的语言管理权限和限制。
8
+ -->
9
+
10
+ # 分布式推理
11
+
12
+ 在分布式设置中,您可以使用 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 或 [PyTorch Distributed](https://pytorch.org/tutorials/beginner/dist_overview.html) 在多个 GPU 上运行推理,这对于并行生成多个提示非常有用。
13
+
14
+ 本指南将向您展示如何使用 🤗 Accelerate 和 PyTorch Distributed 进行分布式推理。
15
+
16
+ ## 🤗 Accelerate
17
+
18
+ 🤗 [Accelerate](https://huggingface.co/docs/accelerate/index) 是一个旨在简化在分布式设置中训练或运行推理的库。它简化了设置分布式环境的过程,让您可以专注于您的 PyTorch 代码。
19
+
20
+ 首先,创建一个 Python 文件并初始化一个 [`accelerate.PartialState`] 来创建分布式环境;您的设置会自动检测,因此您无需明确定义 `rank` 或 `world_size`。将 [`DiffusionPipeline`] 移动到 `distributed_state.device` 以为每个进程分配一个 GPU。
21
+
22
+ 现在使用 [`~accelerate.PartialState.split_between_processes`] 实用程序作为上下文管理器,自动在进程数之间分发提示。
23
+
24
+ ```py
25
+ import torch
26
+ from accelerate import PartialState
27
+ from diffusers import DiffusionPipeline
28
+
29
+ pipeline = DiffusionPipeline.from_pretrained(
30
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
31
+ )
32
+ distributed_state = PartialState()
33
+ pipeline.to(distributed_state.device)
34
+
35
+ with distributed_state.split_between_processes(["a dog", "a cat"]) as prompt:
36
+ result = pipeline(prompt).images[0]
37
+ result.save(f"result_{distributed_state.process_index}.png")
38
+ ```
39
+
40
+ 使用 `--num_processes` 参数指定要使用的 GPU 数量,并调用 `accelerate launch` 来运行脚本:
41
+
42
+ ```bash
43
+ accelerate launch run_distributed.py --num_processes=2
44
+ ```
45
+
46
+ <Tip>
47
+
48
+ 参考这个最小示例 [脚本](https://gist.github.com/sayakpaul/cfaebd221820d7b43fae638b4dfa01ba) 以在多个 GPU 上运行推理。要了解更多信息,请查看 [使用 🤗 Accelerate 进行分布式推理](https://huggingface.co/docs/accelerate/en/usage_guides/distributed_inference#distributed-inference-with-accelerate) 指南。
49
+
50
+ </Tip>
51
+
52
+ ## PyTorch Distributed
53
+
54
+ PyTorch 支持 [`DistributedDataParallel`](https://pytorch.org/docs/stable/generated/torch.nn.parallel.DistributedDataParallel.html),它启用了数据
55
+ 并行性。
56
+
57
+ 首先,创建一个 Python 文件并导入 `torch.distributed` 和 `torch.multiprocessing` 来设置分布式进程组,并为每个 GPU 上的推理生成进程。您还应该初始化一个 [`DiffusionPipeline`]:
58
+
59
+ ```py
60
+ import torch
61
+ import torch.distributed as dist
62
+ import torch.multiprocessing as mp
63
+
64
+ from diffusers import DiffusionPipeline
65
+
66
+ sd = DiffusionPipeline.from_pretrained(
67
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
68
+ )
69
+ ```
70
+
71
+ 您需要创建一个函数来运行推理;[`init_process_group`](https://pytorch.org/docs/stable/distributed.html?highlight=init_process_group#torch.distributed.init_process_group) 处理创建一个分布式环境,指定要使用的后端类型、当前进程的 `rank` 以及参与进程的数量 `world_size`。如果您在 2 个 GPU 上并行运行推理,那么 `world_size` 就是 2。
72
+
73
+ 将 [`DiffusionPipeline`] 移动到 `rank`,并使用 `get_rank` 为每个进程分配一个 GPU,其中每个进程处理不同的提示:
74
+
75
+ ```py
76
+ def run_inference(rank, world_size):
77
+ dist.init_process_group("nccl", rank=rank, world_size=world_size)
78
+
79
+ sd.to(rank)
80
+
81
+ if torch.distributed.get_rank() == 0:
82
+ prompt = "a dog"
83
+ elif torch.distributed.get_rank() == 1:
84
+ prompt = "a cat"
85
+
86
+ image = sd(prompt).images[0]
87
+ image.save(f"./{'_'.join(prompt)}.png")
88
+ ```
89
+
90
+ 要运行分布式推理,调用 [`mp.spawn`](https://pytorch.org/docs/stable/multiprocessing.html#torch.multiprocessing.spawn) 在 `world_size` 定义的 GPU 数量上运行 `run_inference` 函数:
91
+
92
+ ```py
93
+ def main():
94
+ world_size = 2
95
+ mp.spawn(run_inference, args=(world_size,), nprocs=world_size, join=True)
96
+
97
+
98
+ if __name__ == "__main__":
99
+ main()
100
+ ```
101
+
102
+ 完成推理脚本后,使用 `--nproc_per_node` 参数指定要使用的 GPU 数量,并调用 `torchrun` 来运行脚本:
103
+
104
+ ```bash
105
+ torchrun run_distributed.py --nproc_per_node=2
106
+ ```
107
+
108
+ > [!TIP]
109
+ > 您可以在 [`DiffusionPipeline`] 中使用 `device_map` 将其模型级组件分布在多个设备上。请参考 [设备放置](../tutorials/inference_with_big_models#device-placement) 指南了解更多信息。
110
+
111
+ ## 模型分片
112
+
113
+ 现代扩散系统,如 [Flux](../api/pipelines/flux),非常大且包含多个模型。例如,[Flux.1-Dev](https://hf.co/black-forest-labs/FLUX.1-dev) 由两个文本编码器 - [T5-XXL](https://hf.co/google/t5-v1_1-xxl) 和 [CLIP-L](https://hf.co/openai/clip-vit-large-patch14) - 一个 [扩散变换器](../api/models/flux_transformer),以及一个 [VAE](../api/models/autoencoderkl) 组成。对于如此大的模型,在消费级 GPU 上运行推理可能具有挑战性。
114
+
115
+ 模型分片是一种技术,当模型无法容纳在单个 GPU 上时,将模型分布在多个 GPU 上。下面的示例假设有两个 16GB GPU 可用于推理。
116
+
117
+ 开始使用文本编码器计算文本嵌入。通过设置 `device_map="balanced"` 将文本编码器保持在两个GPU上。`balanced` 策略将模型均匀分布在所有可用GPU上。使用 `max_memory` 参数为每个GPU上的每个文本编码器分配最大内存量。
118
+
119
+ > [!TIP]
120
+ > **仅** 在此步骤加载文本编码器!扩散变换器和VAE在后续步骤中加载以节省内存。
121
+
122
+ ```py
123
+ from diffusers import FluxPipeline
124
+ import torch
125
+
126
+ prompt = "a photo of a dog with cat-like look"
127
+
128
+ pipeline = FluxPipeline.from_pretrained(
129
+ "black-forest-labs/FLUX.1-dev",
130
+ transformer=None,
131
+ vae=None,
132
+ device_map="balanced",
133
+ max_memory={0: "16GB", 1: "16GB"},
134
+ torch_dtype=torch.bfloat16
135
+ )
136
+ with torch.no_grad():
137
+ print("Encoding prompts.")
138
+ prompt_embeds, pooled_prompt_embeds, text_ids = pipeline.encode_prompt(
139
+ prompt=prompt, prompt_2=None, max_sequence_length=512
140
+ )
141
+ ```
142
+
143
+ 一旦文本嵌入计算完成,从GPU中移除它们以为扩散变换器腾出空间。
144
+
145
+ ```py
146
+ import gc
147
+
148
+ def flush():
149
+ gc.collect()
150
+ torch.cuda.empty_cache()
151
+ torch.cuda.reset_max_memory_allocated()
152
+ torch.cuda.reset_peak_memory_stats()
153
+
154
+ del pipeline.text_encoder
155
+ del pipeline.text_encoder_2
156
+ del pipeline.tokenizer
157
+ del pipeline.tokenizer_2
158
+ del pipeline
159
+
160
+ flush()
161
+ ```
162
+
163
+ 接下来加载扩散变换器,它有125亿参数。这次,设置 `device_map="auto"` 以自动将模型分布在两个16GB GPU上。`auto` 策略由 [Accelerate](https://hf.co/docs/accelerate/index) 支持,并作为 [大模型推理](https://hf.co/docs/accelerate/concept_guides/big_model_inference) 功能的一部分可用。它首先将模型分布在最快的设备(GPU)上,然后在需要时移动到较慢的设备如CPU和硬盘。将模型参数存储在较慢设备上的权衡是推理延迟较慢。
164
+
165
+ ```py
166
+ from diffusers import AutoModel
167
+ import torch
168
+
169
+ transformer = AutoModel.from_pretrained(
170
+ "black-forest-labs/FLUX.1-dev",
171
+ subfolder="transformer",
172
+ device_map="auto",
173
+ torch_dtype=torch.bfloat16
174
+ )
175
+ ```
176
+
177
+ > [!TIP]
178
+ > 在任何时候,您可以尝试 `print(pipeline.hf_device_map)` 来查看各种模型如何在设备上分布。这对于跟踪模型的设备放置很有用。您也可以尝试 `print(transformer.hf_device_map)` 来查看变换器模型如何在设备上分片。
179
+
180
+ 将变换器模型添加到管道中以进行去噪,但将其他模型级组件如文本编码器和VAE设置为 `None`,因为您还不需要它们。
181
+
182
+ ```py
183
+ pipeline = FluxPipeline.from_pretrained(
184
+ "black-forest-labs/FLUX.1-dev",
185
+ text_encoder=None,
186
+ text_encoder_2=None,
187
+ tokenizer=None,
188
+ tokenizer_2=None,
189
+ vae=None,
190
+ transformer=transformer,
191
+ torch_dtype=torch.bfloat16
192
+ )
193
+
194
+ print("Running denoising.")
195
+ height, width = 768, 1360
196
+ latents = pipeline(
197
+
198
+
199
+ prompt_embeds=prompt_embeds,
200
+ pooled_prompt_embeds=pooled_prompt_embeds,
201
+ num_inference_steps=50,
202
+ guidance_scale=3.5,
203
+ height=height,
204
+ width=width,
205
+ output_type="latent",
206
+ ).images
207
+ ```
208
+
209
+ 从内存中移除管道和变换器,因为它们不再需要。
210
+
211
+ ```py
212
+ del pipeline.transformer
213
+ del pipeline
214
+
215
+ flush()
216
+ ```
217
+
218
+ 最后,使用变分自编码器(VAE)将潜在表示解码为图像。VAE通常足够小,可以在单个GPU上加载。
219
+
220
+ ```py
221
+ from diffusers import AutoencoderKL
222
+ from diffusers.image_processor import VaeImageProcessor
223
+ import torch
224
+
225
+ vae = AutoencoderKL.from_pretrained(ckpt_id, subfolder="vae", torch_dtype=torch.bfloat16).to("cuda")
226
+ vae_scale_factor = 2 ** (len(vae.config.block_out_channels))
227
+ image_processor = VaeImageProcessor(vae_scale_factor=vae_scale_factor)
228
+
229
+ with torch.no_grad():
230
+ print("运行解码中。")
231
+ latents = FluxPipeline._unpack_latents(latents, height, width, vae_scale_factor)
232
+ latents = (latents / vae.config.scaling_factor) + vae.config.shift_factor
233
+
234
+ image = vae.decode(latents, return_dict=False)[0]
235
+ image = image_processor.postprocess(image, output_type="pil")
236
+ image[0].save("split_transformer.png")
237
+ ```
238
+
239
+ 通过选择性加载和卸载在特定阶段所需的模型,并将最大模型分片到多个GPU上,可以在消费级GPU上运行大型模型的推理。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/dreambooth.md ADDED
@@ -0,0 +1,643 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 The HuggingFace Team。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版(“许可证”)授权;除非遵守许可证,否则不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,否则根据许可证分发的软件按“原样”分发,不附带任何明示或暗示的担保或条件。请参阅许可证以了解特定的语言管理权限和限制。
8
+ -->
9
+
10
+ # DreamBooth
11
+
12
+ [DreamBooth](https://huggingface.co/papers/2208.12242) 是一种训练技术,通过仅训练少数主题或风格的图像来更新整个扩散模型。它通过在提示中关联一个特殊词与示例图像来工作。
13
+
14
+ 如果您在 vRAM 有限的 GPU 上训练,应尝试在训练命令中启用 `gradient_checkpointing` 和 `mixed_precision` 参数。您还可以通过使用 [xFormers](../optimization/xformers) 的内存高效注意力来减少内存占用。JAX/Flax 训练也支持在 TPU 和 GPU 上进行高效训练,但不支持梯度检查点或 xFormers。如果您想使用 Flax 更快地训练,应拥有内存 >30GB 的 GPU。
15
+
16
+ 本指南将探索 [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) 脚本,帮助您更熟悉它,以及如何根据您的用例进行适配。
17
+
18
+ 在运行脚本之前,请确保从源代码安装库:
19
+
20
+ ```bash
21
+ git clone https://github.com/huggingface/diffusers
22
+ cd diffusers
23
+ pip install .
24
+ ```
25
+
26
+ 导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项:
27
+
28
+ <hfoptions id="installation">
29
+ <hfoption id="PyTorch">
30
+
31
+ ```bash
32
+ cd examples/dreambooth
33
+ pip install -r requirements.txt
34
+ ```
35
+
36
+ </hfoption>
37
+ <hfoption id="Flax">
38
+
39
+ ```bash
40
+ cd examples/dreambooth
41
+ pip install -r requirements_flax.txt
42
+ ```
43
+
44
+ </hfoption>
45
+ </hfoptions>
46
+
47
+ <Tip>
48
+
49
+ 🤗 Accelerate 是一个库,用于帮助您在多个 GPU/TPU 上或使用混合精度进行训练。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 以了解更多信息。
50
+
51
+ </Tip>
52
+
53
+ 初始化 🤗 Accelerate 环境:
54
+
55
+ ```bash
56
+ accelerate config
57
+ ```
58
+
59
+ 要设置默认的 🤗 Accelerate 环境而不选择任何配置:
60
+
61
+ ```bash
62
+ accelerate config default
63
+ ```
64
+
65
+ 或者,如果您的环境不支持交互式 shell,例如笔记本,您可以使用:
66
+
67
+ ```py
68
+ from accelerate.utils import write_basic_config
69
+
70
+ write_basic_config()
71
+ ```
72
+
73
+ 最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与
74
+ 训练脚本。
75
+
76
+ <Tip>
77
+
78
+ 以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读[脚本](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py),并告诉我们如果您有任何问题或疑虑。
79
+
80
+ </Tip>
81
+
82
+ ## 脚本参数
83
+
84
+ <Tip warning={true}>
85
+
86
+ DreamBooth 对训练超参数非常敏感,容易过拟合。阅读 [使用 🧨 Diffusers 训练 Stable Diffusion 与 Dreambooth](https://huggingface.co/blog/dreambooth) 博客文章,了解针对不同主题的推荐设置,以帮助您选择合适的超参数。
87
+
88
+ </Tip>
89
+
90
+ 训练脚本提供了许多参数来自定义您的训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L228) 函数中找到。参数设置了默认值,这些默认值应该开箱即用效果不错,但如果您愿意,也可以在训练命令中设置自己的值。
91
+
92
+ 例如,要以 bf16 格式进行训练:
93
+
94
+ ```bash
95
+ accelerate launch train_dreambooth.py \
96
+ --mixed_precision="bf16"
97
+ ```
98
+
99
+ 一些基本且重要的参数需要了解和指定:
100
+
101
+ - `--pretrained_model_name_or_path`: Hub 上的模型名称或预训练模型的本地路径
102
+ - `--instance_data_dir`: 包含训练数据集(示例图像)的文件夹路径
103
+ - `--instance_prompt`: 包含示例图像特殊单词的文本提示
104
+ - `--train_text_encoder`: 是否也训练文本编码器
105
+ - `--output_dir`: 保存训练后模型的位置
106
+ - `--push_to_hub`: 是否将训练后的模型推送到 Hub
107
+ - `--checkpointing_steps`: 模型训练时保存检查点的频率;这在训练因某种原因中断时很有用,您可以通过在训练命令中添加 `--resume_from_checkpoint` 来从该检查点继续训练
108
+
109
+ ### Min-SNR 加权
110
+
111
+ [Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略可以通过重新平衡损失来帮助训练,以实现更快的收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,但 Min-SNR 与两种预测类型都兼容。此加权策略仅由 PyTorch 支持,在 Flax 训练脚本中不可用。
112
+
113
+ 添加 `--snr_gamma` 参数并将其设置为推荐值 5.0:
114
+
115
+ ```bash
116
+ accelerate launch train_dreambooth.py \
117
+ --snr_gamma=5.0
118
+ ```
119
+
120
+ ### 先验保持损失
121
+
122
+ 先验保持损失是一种使用模型自身生成的样本来帮助它学习如何生成更多样化图像的方法。因为这些生成的样本图像属于您提供的图像相同的类别,它们帮助模型 r
123
+ etain 它已经学到的关于类别的知识,以及它如何利用已经了解的类别信息来创建新的组合。
124
+
125
+ - `--with_prior_preservation`: 是否使用先验保留损失
126
+ - `--prior_loss_weight`: 控制先验保留损失对模型的影响程度
127
+ - `--class_data_dir`: 包含生成的类别样本图像的文件夹路径
128
+ - `--class_prompt`: 描述生成的样本图像类别的文本提示
129
+
130
+ ```bash
131
+ accelerate launch train_dreambooth.py \
132
+ --with_prior_preservation \
133
+ --prior_loss_weight=1.0 \
134
+ --class_data_dir="path/to/class/images" \
135
+ --class_prompt="text prompt describing class"
136
+ ```
137
+
138
+ ### 训练文本编码器
139
+
140
+ 为了提高生成输出的质量,除了 UNet 之外,您还可以训练文本编码器。这需要额外的内存,并且您需要一个至少有 24GB 显存的 GPU。如果您拥有必要的硬件,那么训练文本编码器会产生更好的结果,尤其是在生成面部图像时。通过以下方式启用此选项:
141
+
142
+ ```bash
143
+ accelerate launch train_dreambooth.py \
144
+ --train_text_encoder
145
+ ```
146
+
147
+ ## 训练脚本
148
+
149
+ DreamBooth 附带了自己的数据集类:
150
+
151
+ - [`DreamBoothDataset`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L604): 预处理图像和类别图像,并对提示进行分词以用于训练
152
+ - [`PromptDataset`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L738): 生成提示嵌入以生成类别图像
153
+
154
+ 如果您启用了[先验保留损失](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L842),类别图像在此处生成:
155
+
156
+ ```py
157
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
158
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
159
+
160
+ sample_dataloader = accelerator.prepare(sample_dataloader)
161
+ pipeline.to(accelerator.device)
162
+
163
+ for example in tqdm(
164
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
165
+ ):
166
+ images = pipeline(example["prompt"]).images
167
+ ```
168
+
169
+ 接下来是 [`main()`](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L799) 函数,它处理设置训练数据集和训练循环本身。脚本加载 [tokenizer](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L898)、[scheduler 和 models](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L912C1-L912C1):
170
+
171
+ ```py
172
+ # Load the tokenizer
173
+ if args.tokenizer_name:
174
+ tokenizer = AutoTokenizer.from_pretrained(args.tokenizer_name, revision=args.revision, use_fast=False)
175
+ elif args.pretrained_model_name_or_path:
176
+ tokenizer = AutoTokenizer.from_pretrained(
177
+ args.pretrained_model_name_or_path,
178
+ subfolder="tokenizer",
179
+ revision=args.revision,
180
+ use_fast=False,
181
+ )
182
+
183
+ # 加载调度器和模型
184
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
185
+ text_encoder = text_encoder_cls.from_pretrained(
186
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
187
+ )
188
+
189
+ if model_has_vae(args):
190
+ vae = AutoencoderKL.from_pretrained(
191
+ args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision
192
+ )
193
+ else:
194
+ vae = None
195
+
196
+ unet = UNet2DConditionModel.from_pretrained(
197
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
198
+ )
199
+ ```
200
+
201
+ 然后,是时候[创建训练数据集](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L1073)和从`DreamBoothDataset`创建DataLoader:
202
+
203
+ ```py
204
+ train_dataset = DreamBoothDataset(
205
+ instance_data_root=args.instance_data_dir,
206
+ instance_prompt=args.instance_prompt,
207
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
208
+ class_prompt=args.class_prompt,
209
+ class_num=args.num_class_images,
210
+ tokenizer=tokenizer,
211
+ size=args.resolution,
212
+ center_crop=args.center_crop,
213
+ encoder_hidden_states=pre_computed_encoder_hidden_states,
214
+ class_prompt_encoder_hidden_states=pre_computed_class_prompt_encoder_hidden_states,
215
+ tokenizer_max_length=args.tokenizer_max_length,
216
+ )
217
+
218
+ train_dataloader = torch.utils.data.DataLoader(
219
+ train_dataset,
220
+ batch_size=args.train_batch_size,
221
+ shuffle=True,
222
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
223
+ num_workers=args.dataloader_num_workers,
224
+ )
225
+ ```
226
+
227
+ 最后,[训练循环](https://github.com/huggingface/diffusers/blob/072e00897a7cf4302c347a63ec917b4b8add16d4/examples/dreambooth/train_dreambooth.py#L1151)处理剩余步骤,例如将图像转换为潜在空间、向输入添加噪声、预测噪声残差和计算损失。
228
+
229
+ 如果您想了解更多关于训练循环的工作原理,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。
230
+
231
+ ## 启动脚本
232
+
233
+ 您现在准备好启动训练脚本了!🚀
234
+
235
+ 对于本指南,您将下载一些[狗的图片](https://huggingface.co/datasets/diffusers/dog-example)的图像并将它们存储在一个目录中。但请记住,您可以根据需要创建和使用自己的数据集(请参阅[创建用于训练的数据集](create_dataset)指南)。
236
+
237
+ ```py
238
+ from huggingface_hub import snapshot_download
239
+
240
+ local_dir = "./dog"
241
+ snapshot_download(
242
+ "diffusers/dog-example",
243
+ local_dir=local_dir,
244
+ repo_type="dataset",
245
+ ignore_patterns=".gitattributes",
246
+ )
247
+ ```
248
+
249
+ 设置环境变量 `MODEL_NAME` 为 Hub 上的模型 ID 或本地模型路径,`INSTANCE_DIR` 为您刚刚下载狗图像的路径,`OUTPUT_DIR` 为您想保存模型的位置。您将使用 `sks` 作为特殊词来绑定训练。
250
+
251
+ 如果您有兴趣跟随训练过程,可以定期保存生成的图像作为训练进度。将以下参数添加到训练命令中:
252
+
253
+ ```bash
254
+ --validation_prompt="a photo of a sks dog"
255
+ --num_validation_images=4
256
+ --validation_steps=100
257
+ ```
258
+
259
+ 在启动脚本之前,还有一件事!根据您拥有的 GPU,您可能需要启用某些优化来训练 DreamBooth。
260
+
261
+ <hfoptions id="gpu-select">
262
+ <hfoption id="16GB">
263
+
264
+ 在 16GB GPU 上,您可以使用 bitsandbytes 8 位优化器和梯度检查点来帮助训练 DreamBooth 模型。安装 bitsandbytes:
265
+
266
+ ```py
267
+ pip install bitsandbytes
268
+ ```
269
+
270
+ 然后,将以下参数添加到您的训练命令中:
271
+
272
+ ```bash
273
+ accelerate launch train_dreambooth.py \
274
+ --gradient_checkpointing \
275
+ --use_8bit_adam \
276
+ ```
277
+
278
+ </hfoption>
279
+ <hfoption id="12GB">
280
+
281
+ 在 12GB GPU 上,您需要 bitsandbytes 8 位优化器、梯度检查点、xFormers,并将梯度设置为 `None` 而不是零以减少内存使用。
282
+
283
+ ```bash
284
+ accelerate launch train_dreambooth.py \
285
+ --use_8bit_adam \
286
+ --gradient_checkpointing \
287
+ --enable_xformers_memory_efficient_attention \
288
+ --set_grads_to_none \
289
+ ```
290
+
291
+ </hfoption>
292
+ <hfoption id="8GB">
293
+
294
+ 在 8GB GPU 上,您需要 [DeepSpeed](https://www.deepspeed.ai/) 将一些张量从 vRAM 卸载到 CPU 或 NVME,以便在更少的 GPU 内存下进行训练。
295
+
296
+ 运行以下命令来配置您的 🤗 Accelerate 环境:
297
+
298
+ ```bash
299
+ accelerate config
300
+ ```
301
+
302
+ 在配置过程中,确认您想使用 DeepSpeed。现在,通过结合 DeepSpeed 阶段 2、fp16 混合精度以及将模型参数和优化器状态卸载到 CPU,应该可以在低于 8GB vRAM 的情况下进行训练。缺点是这需要更多的系统 RAM(约 25 GB)。有关更多配置选项,请参阅 [DeepSpeed 文档](https://huggingface.co/docs/accelerate/usage_guides/deepspeed)。
303
+
304
+ 您还应将默认的 Adam 优化器更改为 DeepSpeed 的优化版本 [`deepspeed.ops.adam.DeepSpeedCPUAdam`](https://deepspeed.readthedocs.io/en/latest/optimizers.html#adam-cpu) 以获得显著的速度提升。启用 `DeepSpeedCPUAdam` 要求您的系统 CUDA 工具链版本与 PyTorch 安装的版本相同。
305
+
306
+ 目前,bitsandbytes 8 位优化器似乎与 DeepSpeed 不兼容。
307
+
308
+ 就是这样!您不需要向训练命令添加任何额外参数。
309
+
310
+ </hfoption>
311
+ </hfoptions>
312
+
313
+ <hfoptions id="training-inference">
314
+ <hfoption id="PyTorch">
315
+
316
+ ```bash
317
+ export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
318
+ export INSTANCE_DIR="./dog"
319
+ export OUTPUT_DIR="path_to_
320
+ saved_model"
321
+
322
+ accelerate launch train_dreambooth.py \
323
+ --pretrained_model_name_or_path=$MODEL_NAME \
324
+ --instance_data_dir=$INSTANCE_DIR \
325
+ --output_dir=$OUTPUT_DIR \
326
+ --instance_prompt="a photo of sks dog" \
327
+ --resolution=512 \
328
+ --train_batch_size=1 \
329
+ --gradient_accumulation_steps=1 \
330
+ --learning_rate=5e-6 \
331
+ --lr_scheduler="constant" \
332
+ --lr_warmup_steps=0 \
333
+ --max_train_steps=400 \
334
+ --push_to_hub
335
+ ```
336
+
337
+ </hfoption>
338
+ <hfoption id="Flax">
339
+
340
+ ```bash
341
+ export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
342
+ export INSTANCE_DIR="./dog"
343
+ export OUTPUT_DIR="path-to-save-model"
344
+
345
+ python train_dreambooth_flax.py \
346
+ --pretrained_model_name_or_path=$MODEL_NAME \
347
+ --instance_data_dir=$INSTANCE_DIR \
348
+ --output_dir=$OUTPUT_DIR \
349
+ --instance_prompt="a photo of sks dog" \
350
+ --resolution=512 \
351
+ --train_batch_size=1 \
352
+ --learning_rate=5e-6 \
353
+ --max_train_steps=400 \
354
+ --push_to_hub
355
+ ```
356
+
357
+ </hfoption>
358
+ </hfoptions>
359
+
360
+ 训练完成后,您可以使用新训练的模型进行推理!
361
+
362
+ <Tip>
363
+
364
+ 等不及在训练完成前就尝试您的模型进行推理?🤭 请确保安装了最新版本的 🤗 Accelerate。
365
+
366
+ ```py
367
+ from diffusers import DiffusionPipeline, UNet2DConditionModel
368
+ from transformers import CLIPTextModel
369
+ import torch
370
+
371
+ unet = UNet2DConditionModel.from_pretrained("path/to/model/checkpoint-100/unet")
372
+
373
+ # 如果您使用了 `--args.train_text_encoder` 进行训练,请确保也加载文本编码器
374
+ text_encoder = CLIPTextModel.from_pretrained("path/to/model/checkpoint-100/checkpoint-100/text_encoder")
375
+
376
+ pipeline = DiffusionPipeline.from_pretrained(
377
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", unet=unet, text_encoder=text_encoder, dtype=torch.float16,
378
+ ).to("cuda")
379
+
380
+ image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guidance_scale=7.5).images[0]
381
+ image.save("dog-bucket.png")
382
+ ```
383
+
384
+ </Tip>
385
+
386
+ <hfoptions id="training-inference">
387
+ <hfoption id="PyTorch">
388
+
389
+ ```py
390
+ from diffusers import DiffusionPipeline
391
+ import torch
392
+
393
+ pipeline = DiffusionPipeline.from_pretrained("path_to_saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda")
394
+ image = pipeline("A photo of sks dog in a bucket", num_inference_steps=50, guidance_scale=7.5).images[0]
395
+ image.save("dog-bucket.png")
396
+ ```
397
+
398
+ </hfoption>
399
+ <hfoption id="Flax">
400
+
401
+ ```py
402
+ import jax
403
+ import numpy as np
404
+ from flax.jax_utils import replicate
405
+ from flax.training.common_utils import shard
406
+ from diffusers import FlaxStableDiffusionPipeline
407
+
408
+ pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path-to-your-trained-model", dtype=jax.numpy.bfloat16)
409
+
410
+ prompt = "A photo of sks dog in a bucket"
411
+ prng_seed = jax.random.PRNGKey(0)
412
+ num_inference_steps = 50
413
+
414
+ num_samples = jax.device_count()
415
+ prompt = num_samples * [prompt]
416
+ prompt_ids = pipeline.prepare_inputs(prompt)
417
+
418
+ # 分片输入和随机数生成器
419
+ params = replicate(params)
420
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
421
+ prompt_ids = shard(prompt_ids)
422
+
423
+ images = pipeline(prompt_ids, params, prng_seed, num_inference_
424
+ steps, jit=True).images
425
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
426
+ image.save("dog-bucket.png")
427
+ ```
428
+
429
+ </hfoption>
430
+ </hfoptions>
431
+
432
+ ## LoRA
433
+
434
+ LoRA 是一种训练技术,可显著减少可训练参数的数量。因此,训练速度更快,并且更容易存储生成的权重,因为它们小得多(约 100MB)。使用 [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) 脚本通过 LoRA 进行训练。
435
+
436
+ LoRA 训练脚本在 [LoRA 训练](lora) 指南中有更详细的讨论。
437
+
438
+ ## Stable Diffusion XL
439
+
440
+ Stable Diffusion XL (SDXL) 是一个强大的文本到图像模型,可生成高分辨率图像,并在其架构中添加了第二个文本编码器。使用 [train_dreambooth_lora_sdxl.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora_sdxl.py) 脚本通过 LoRA 训练 SDXL 模型。
441
+
442
+ SDXL 训练脚本在 [SDXL 训练](sdxl) 指南中有更详细的讨论。
443
+
444
+ ## DeepFloyd IF
445
+
446
+ DeepFloyd IF 是一个级联像素扩散模型,包含三个阶段。第一阶段生成基础图像,第二和第三阶段逐步将基础图像放大为高分辨率 1024x1024 图像。使用 [train_dreambooth_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py) 或 [train_dreambooth.py](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth.py) 脚本通过 LoRA 或完整模型训练 DeepFloyd IF 模型。
447
+
448
+ DeepFloyd IF 使用预测方差,但 Diffusers 训练脚本使用预测误差,因此训练的 DeepFloyd IF 模型被切换到固定方差调度。训练脚本将为您更新完全训练模型的调度器配置。但是,当您加载保存的 LoRA 权重时,还必须更新管道的调度器配置。
449
+
450
+ ```py
451
+ from diffusers import DiffusionPipeline
452
+
453
+ pipe = DiffusionPipeline.from_pretrained("DeepFloyd/IF-I-XL-v1.0", use_safetensors=True)
454
+
455
+ pipe.load_lora_weights("<lora weights path>")
456
+
457
+ # 更新调度器配置为固定方差调度
458
+ pipe.scheduler = pipe.scheduler.__class__.from_config(pipe.scheduler.config, variance_type="fixed_small")
459
+ ```
460
+
461
+ 第二阶段模型需要额外的验证图像进行放大。您可以下载并使用训练图像的缩小版本。
462
+
463
+ ```py
464
+ from huggingface_hub import snapshot_download
465
+
466
+ local_dir = "./dog_downsized"
467
+ snapshot_download(
468
+ "diffusers/dog-example-downsized",
469
+ local_dir=local_dir,
470
+ repo_type="dataset",
471
+ ignore_patterns=".gitattributes",
472
+ )
473
+ ```
474
+
475
+ 以下代码示例简要概述了如何结合 DreamBooth 和 LoRA 训练 DeepFloyd IF 模型。一些需要注意的重要参数包括:
476
+
477
+ * `--resolution=64`,需要更小的分辨率,因为 DeepFloyd IF 是
478
+ 一个像素扩散模型,用于处理未压缩的像素,输入图像必须更小
479
+ * `--pre_compute_text_embeddings`,提前计算文本嵌入以节省内存,因为 [`~transformers.T5Model`] 可能占用大量内存
480
+ * `--tokenizer_max_length=77`,您可以使用更长的默认文本长度与 T5 作为文本编码器,但默认模型编码过程使用较短的文本长度
481
+ * `--text_encoder_use_attention_mask`,将注意力掩码传递给文本编码器
482
+
483
+ <hfoptions id="IF-DreamBooth">
484
+ <hfoption id="Stage 1 LoRA DreamBooth">
485
+
486
+ 使用 LoRA 和 DreamBooth 训练 DeepFloyd IF 的第 1 阶段需要约 28GB 内存。
487
+
488
+ ```bash
489
+ export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
490
+ export INSTANCE_DIR="dog"
491
+ export OUTPUT_DIR="dreambooth_dog_lora"
492
+
493
+ accelerate launch train_dreambooth_lora.py \
494
+ --report_to wandb \
495
+ --pretrained_model_name_or_path=$MODEL_NAME \
496
+ --instance_data_dir=$INSTANCE_DIR \
497
+ --output_dir=$OUTPUT_DIR \
498
+ --instance_prompt="a sks dog" \
499
+ --resolution=64 \
500
+ --train_batch_size=4 \
501
+ --gradient_accumulation_steps=1 \
502
+ --learning_rate=5e-6 \
503
+ --scale_lr \
504
+ --max_train_steps=1200 \
505
+ --validation_prompt="a sks dog" \
506
+ --validation_epochs=25 \
507
+ --checkpointing_steps=100 \
508
+ --pre_compute_text_embeddings \
509
+ --tokenizer_max_length=77 \
510
+ --text_encoder_use_attention_mask
511
+ ```
512
+
513
+ </hfoption>
514
+ <hfoption id="Stage 2 LoRA DreamBooth">
515
+
516
+ 对于使用 LoRA 和 DreamBooth 的 DeepFloyd IF 第 2 阶段,请注意这些参数:
517
+
518
+ * `--validation_images`,验证期间用于上采样的图像
519
+ * `--class_labels_conditioning=timesteps`,根据需要额外条件化 UNet,如第 2 阶段中所需
520
+ * `--learning_rate=1e-6`,与第 1 阶段相比使用较低的学习率
521
+ * `--resolution=256`,上采样器的预期分辨率
522
+
523
+ ```bash
524
+ export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
525
+ export INSTANCE_DIR="dog"
526
+ export OUTPUT_DIR="dreambooth_dog_upscale"
527
+ export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
528
+
529
+ python train_dreambooth_lora.py \
530
+ --report_to wandb \
531
+ --pretrained_model_name_or_path=$MODEL_NAME \
532
+ --instance_data_dir=$INSTANCE_DIR \
533
+ --output_dir=$OUTPUT_DIR \
534
+ --instance_prompt="a sks dog" \
535
+ --resolution=256 \
536
+ --train_batch_size=4 \
537
+ --gradient_accumulation_steps=1 \
538
+ --learning_rate=1e-6 \
539
+ --max_train_steps=2000 \
540
+ --validation_prompt="a sks dog" \
541
+ --validation_epochs=100 \
542
+ --checkpointing_steps=500 \
543
+ --pre_compute_text_embeddings \
544
+ --tokenizer_max_length=77 \
545
+ --text_encoder_use_attention_mask \
546
+ --validation_images $VALIDATION_IMAGES \
547
+ --class_labels_conditioning=timesteps
548
+ ```
549
+
550
+ </hfoption>
551
+ <hfoption id="Stage 1 DreamBooth">
552
+
553
+ 对于使用 DreamBooth 的 DeepFloyd IF 第 1 阶段,请注意这些参数:
554
+
555
+ * `--skip_save_text_encoder`,跳过保存完整 T5 文本编码器与微调模型
556
+ * `--use_8bit_adam`,使用 8 位 Adam 优化器以节省内存,因为
557
+
558
+ 优化器状态的大小在训练完整模型时
559
+ * `--learning_rate=1e-7`,对于完整模型训练应使用非常低的学习率,否则模型质量会下降(您可以使用更高的学习率和更大的批次大小)
560
+
561
+ 使用8位Adam和批次大小为4进行训练,完整模型可以在约48GB内存下训练。
562
+
563
+ ```bash
564
+ export MODEL_NAME="DeepFloyd/IF-I-XL-v1.0"
565
+ export INSTANCE_DIR="dog"
566
+ export OUTPUT_DIR="dreambooth_if"
567
+
568
+ accelerate launch train_dreambooth.py \
569
+ --pretrained_model_name_or_path=$MODEL_NAME \
570
+ --instance_data_dir=$INSTANCE_DIR \
571
+ --output_dir=$OUTPUT_DIR \
572
+ --instance_prompt="a photo of sks dog" \
573
+ --resolution=64 \
574
+ --train_batch_size=4 \
575
+ --gradient_accumulation_steps=1 \
576
+ --learning_rate=1e-7 \
577
+ --max_train_steps=150 \
578
+ --validation_prompt "a photo of sks dog" \
579
+ --validation_steps 25 \
580
+ --text_encoder_use_attention_mask \
581
+ --tokenizer_max_length 77 \
582
+ --pre_compute_text_embeddings \
583
+ --use_8bit_adam \
584
+ --set_grads_to_none \
585
+ --skip_save_text_encoder \
586
+ --push_to_hub
587
+ ```
588
+
589
+ </hfoption>
590
+ <hfoption id="Stage 2 DreamBooth">
591
+
592
+ 对于DeepFloyd IF的第二阶段DreamBooth,请注意这些参数:
593
+
594
+ * `--learning_rate=5e-6`,使用较低的学习率和较小的有效批次大小
595
+ * `--resolution=256`,上采样器的预期分辨率
596
+ * `--train_batch_size=2` 和 `--gradient_accumulation_steps=6`,为了有效训练包含面部的图像,需要更大的批次大小
597
+
598
+ ```bash
599
+ export MODEL_NAME="DeepFloyd/IF-II-L-v1.0"
600
+ export INSTANCE_DIR="dog"
601
+ export OUTPUT_DIR="dreambooth_dog_upscale"
602
+ export VALIDATION_IMAGES="dog_downsized/image_1.png dog_downsized/image_2.png dog_downsized/image_3.png dog_downsized/image_4.png"
603
+
604
+ accelerate launch train_dreambooth.py \
605
+ --report_to wandb \
606
+ --pretrained_model_name_or_path=$MODEL_NAME \
607
+ --instance_data_dir=$INSTANCE_DIR \
608
+ --output_dir=$OUTPUT_DIR \
609
+ --instance_prompt="a sks dog" \
610
+ --resolution=256 \
611
+ --train_batch_size=2 \
612
+ --gradient_accumulation_steps=6 \
613
+ --learning_rate=5e-6 \
614
+ --max_train_steps=2000 \
615
+ --validation_prompt="a sks dog" \
616
+ --validation_steps=150 \
617
+ --checkpointing_steps=500 \
618
+ --pre_compute_text_embeddings \
619
+ --tokenizer_max_length=77 \
620
+ --text_encoder_use_attention_mask \
621
+ --validation_images $VALIDATION_IMAGES \
622
+ --class_labels_conditioning timesteps \
623
+ --push_to_hub
624
+ ```
625
+
626
+ </hfoption>
627
+ </hfoptions>
628
+
629
+ ### 训练技巧
630
+
631
+ 训练DeepFloyd IF模型可能具有挑战性,��以下是我们发现有用的技巧:
632
+
633
+ - LoRA对于训练第一阶段模型已足够,因为模型的低分辨率使得表示更精细的细节变得困难,无论如何。
634
+ - 对于常见或简单的对象,您不一定需要微调上采样器。确保传递给上采样器的提示被调整以移除实例提示中的新令牌。例如,如果您第一阶段提示是"a sks dog",那么您第二阶段的提示应该是"a dog"。
635
+ - 对于更精细的细节,如面部,完全训练
636
+ 使用阶段2上采样器比使用LoRA训练阶段2模型更好。使用更大的批次大小和较低的学习率也有帮助。
637
+ - 应使用较低的学习率来训练阶段2模型。
638
+ - [`DDPMScheduler`] 比训练脚本中使用的DPMSolver效果更好。
639
+
640
+ ## 下一步
641
+
642
+ 恭喜您训练了您的DreamBooth模型!要了解更多关于如何使用您的新模型的信息,以下指南可能有所帮助:
643
+ - 如果您使用LoRA训练了您的模型,请学习如何[加载DreamBooth](../using-diffusers/loading_adapters)模型进行推理。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/instructpix2pix.md ADDED
@@ -0,0 +1,255 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
+ the License. You may obtain a copy of the License at
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
+ specific language governing permissions and limitations under the License.
11
+ -->
12
+
13
+ # InstructPix2Pix
14
+
15
+ [InstructPix2Pix](https://hf.co/papers/2211.09800) 是一个基于 Stable Diffusion 训练的模型,用于根据人类提供的指令编辑图像。例如,您的提示可以是“将云变成雨天”,模型将相应编辑输入图像。该模型以文本提示(或编辑指令)和输入图像为条件。
16
+
17
+ 本指南将探索 [train_instruct_pix2pix.py](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py) 训练脚本,帮助您熟悉它,以及如何将其适应您自己的用例。
18
+
19
+ 在运行脚本之前,请确保从源代码安装库:
20
+
21
+ ```bash
22
+ git clone https://github.com/huggingface/diffusers
23
+ cd diffusers
24
+ pip install .
25
+ ```
26
+
27
+ 然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项:
28
+
29
+ ```bash
30
+ cd examples/instruct_pix2pix
31
+ pip install -r requirements.txt
32
+ ```
33
+
34
+ <Tip>
35
+
36
+ 🤗 Accelerate 是一个库,用于帮助您在多个 GPU/TPU 上或使用混合精度进行训练。它将根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate [快速导览](https://huggingface.co/docs/accelerate/quicktour) 以了解更多信息。
37
+
38
+ </Tip>
39
+
40
+ 初始化一个 🤗 Accelerate 环境:
41
+
42
+ ```bash
43
+ accelerate config
44
+ ```
45
+
46
+ 要设置一个默认的 🤗 Accelerate 环境,无需选择任何配置:
47
+
48
+ ```bash
49
+ accelerate config default
50
+ ```
51
+
52
+ 或者,如果您的环境不支持交互式 shell,例如笔记本,您可以使用:
53
+
54
+ ```py
55
+ from accelerate.utils import write_basic_config
56
+
57
+ write_basic_config()
58
+ ```
59
+
60
+ 最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。
61
+
62
+ <Tip>
63
+
64
+ 以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读 [脚本](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix.py),并告诉我们如果您有任何问题或疑虑。
65
+
66
+ </Tip>
67
+
68
+ ## 脚本参数
69
+
70
+ 训练脚本有许多参数可帮助您自定义训练运行。所有
71
+ 参数及其描述可在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L65) 函数中找到。大多数参数都提供了默认值,这些值效果相当不错,但如果您愿意,也可以在训练命令中设置自己的值。
72
+
73
+ 例如,要增加输入图像的分辨率:
74
+
75
+ ```bash
76
+ accelerate launch train_instruct_pix2pix.py \
77
+ --resolution=512 \
78
+ ```
79
+
80
+ 许多基本和重要的参数在 [文本到图像](text2image#script-parameters) 训练指南中已有描述,因此本指南仅关注与 InstructPix2Pix 相关的参数:
81
+
82
+ - `--original_image_column`:编辑前的原始图像
83
+ - `--edited_image_column`:编辑后的图像
84
+ - `--edit_prompt_column`:编辑图像的指令
85
+ - `--conditioning_dropout_prob`:训练期间编辑图像和编辑提示的 dropout 概率,这为一种或两种条件输入启用了无分类器引导(CFG)
86
+
87
+ ## 训练脚本
88
+
89
+ 数据集预处理代码和训练循环可在 [`main()`](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L374) 函数中找到。这是您将修改训练脚本以适应自己用例的地方。
90
+
91
+ 与脚本参数类似,[文本到图像](text2image#training-script) 训练指南提供了训练脚本的逐步说明。相反,本指南将查看脚本中与 InstructPix2Pix 相关的部分。
92
+
93
+ 脚本首先修改 UNet 的第一个卷积层中的 [输入通道数](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L445),以适应 InstructPix2Pix 的额外条件图像:
94
+
95
+ ```py
96
+ in_channels = 8
97
+ out_channels = unet.conv_in.out_channels
98
+ unet.register_to_config(in_channels=in_channels)
99
+
100
+ with torch.no_grad():
101
+ new_conv_in = nn.Conv2d(
102
+ in_channels, out_channels, unet.conv_in.kernel_size, unet.conv_in.stride, unet.conv_in.padding
103
+ )
104
+ new_conv_in.weight.zero_()
105
+ new_conv_in.weight[:, :4, :, :].copy_(unet.conv_in.weight)
106
+ unet.conv_in = new_conv_in
107
+ ```
108
+
109
+ 这些 UNet 参数由优化器 [更新](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L545C1-L551C6):
110
+
111
+ ```py
112
+ optimizer = optimizer_cls(
113
+ unet.parameters(),
114
+ lr=args.learning_rate,
115
+ betas=(args.adam_beta1, args.adam_beta2),
116
+ weight_decay=args.adam_weight_decay,
117
+ eps=args.adam_epsilon,
118
+ )
119
+ ```
120
+
121
+ 接下来,编辑后的图像和编辑指令被 [预处理](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L624)并被[tokenized](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L610C24-L610C24)。重要的是,对原始图像和编辑后的图像应用相同的图像变换。
122
+
123
+ ```py
124
+ def preprocess_train(examples):
125
+ preprocessed_images = preprocess_images(examples)
126
+
127
+ original_images, edited_images = preprocessed_images.chunk(2)
128
+ original_images = original_images.reshape(-1, 3, args.resolution, args.resolution)
129
+ edited_images = edited_images.reshape(-1, 3, args.resolution, args.resolution)
130
+
131
+ examples["original_pixel_values"] = original_images
132
+ examples["edited_pixel_values"] = edited_images
133
+
134
+ captions = list(examples[edit_prompt_column])
135
+ examples["input_ids"] = tokenize_captions(captions)
136
+ return examples
137
+ ```
138
+
139
+ 最后,在[训练循环](https://github.com/huggingface/diffusers/blob/64603389da01082055a901f2883c4810d1144edb/examples/instruct_pix2pix/train_instruct_pix2pix.py#L730)中,它首先将编辑后的图像编码到潜在空间:
140
+
141
+ ```py
142
+ latents = vae.encode(batch["edited_pixel_values"].to(weight_dtype)).latent_dist.sample()
143
+ latents = latents * vae.config.scaling_factor
144
+ ```
145
+
146
+ 然后,脚本对原始图像和编辑指令嵌入应用 dropout 以支持 CFG(Classifier-Free Guidance)。这使得模型能够调节编辑指令和原始图像对编辑后图像的影响。
147
+
148
+ ```py
149
+ encoder_hidden_states = text_encoder(batch["input_ids"])[0]
150
+ original_image_embeds = vae.encode(batch["original_pixel_values"].to(weight_dtype)).latent_dist.mode()
151
+
152
+ if args.conditioning_dropout_prob is not None:
153
+ random_p = torch.rand(bsz, device=latents.device, generator=generator)
154
+ prompt_mask = random_p < 2 * args.conditioning_dropout_prob
155
+ prompt_mask = prompt_mask.reshape(bsz, 1, 1)
156
+ null_conditioning = text_encoder(tokenize_captions([""]).to(accelerator.device))[0]
157
+ encoder_hidden_states = torch.where(prompt_mask, null_conditioning, encoder_hidden_states)
158
+
159
+ image_mask_dtype = original_image_embeds.dtype
160
+ image_mask = 1 - (
161
+ (random_p >= args.conditioning_dropout_prob).to(image_mask_dtype)
162
+ * (random_p < 3 * args.conditioning_dropout_prob).to(image_mask_dtype)
163
+ )
164
+ image_mask = image_mask.reshape(bsz, 1, 1, 1)
165
+ original_image_embeds = image_mask * original_image_embeds
166
+ ```
167
+
168
+ 差不多就是这样了!除了这里描述的不同之处,脚本的其余部分与[文本到图像](text2image#training-script)训练脚本非常相似,所以请随意查看以获取更多细节。如果您想了解更多关于训练循环如何工作的信息,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。
169
+
170
+ ## 启动脚本
171
+
172
+ 一旦您对脚本的更改感到满意,或者如果您对默认配置没问题,您
173
+ 准备好启动训练脚本!🚀
174
+
175
+ 本指南使用 [fusing/instructpix2pix-1000-samples](https://huggingface.co/datasets/fusing/instructpix2pix-1000-samples) 数据集,这是 [原始数据集](https://huggingface.co/datasets/timbrooks/instructpix2pix-clip-filtered) 的一个较小版本。您也可以创建并使用自己的数据集(请参阅 [创建用于训练的数据集](create_dataset) 指南)。
176
+
177
+ 将 `MODEL_NAME` 环境变量设置为模型名称(可以是 Hub 上的模型 ID 或本地模型的路径),并将 `DATASET_ID` 设置为 Hub 上数据集的名称。脚本会创建并保存所有组件(特征提取器、调度器、文本编码器、UNet 等)到您的仓库中的一个子文件夹。
178
+
179
+ <Tip>
180
+
181
+ 为了获得更好的结果,尝试使用更大的数据集进行更长时间的训练。我们只在较小规模的数据集上测试过此训练脚本。
182
+
183
+ <br>
184
+
185
+ 要使用 Weights and Biases 监控训练进度,请将 `--report_to=wandb` 参数添加到训练命令中,并使用 `--val_image_url` 指定验证图像,使用 `--validation_prompt` 指定验证提示。这对于调试模型非常有用。
186
+
187
+ </Tip>
188
+
189
+ 如果您在多个 GPU 上训练,请将 `--multi_gpu` 参数添加到 `accelerate launch` 命令中。
190
+
191
+ ```bash
192
+ accelerate launch --mixed_precision="fp16" train_instruct_pix2pix.py \
193
+ --pretrained_model_name_or_path=$MODEL_NAME \
194
+ --dataset_name=$DATASET_ID \
195
+ --enable_xformers_memory_efficient_attention \
196
+ --resolution=256 \
197
+ --random_flip \
198
+ --train_batch_size=4 \
199
+ --gradient_accumulation_steps=4 \
200
+ --gradient_checkpointing \
201
+ --max_train_steps=15000 \
202
+ --checkpointing_steps=5000 \
203
+ --checkpoints_total_limit=1 \
204
+ --learning_rate=5e-05 \
205
+ --max_grad_norm=1 \
206
+ --lr_warmup_steps=0 \
207
+ --conditioning_dropout_prob=0.05 \
208
+ --mixed_precision=fp16 \
209
+ --seed=42 \
210
+ --push_to_hub
211
+ ```
212
+
213
+ 训练完成后,您可以使用您的新 InstructPix2Pix 进行推理:
214
+
215
+ ```py
216
+ import PIL
217
+ import requests
218
+ import torch
219
+ from diffusers import StableDiffusionInstructPix2PixPipeline
220
+ from diffusers.utils import load_image
221
+
222
+ pipeline = StableDiffusionInstructPix2PixPipeline.from_pretrained("your_cool_model", torch_dtype=torch.float16).to("cuda")
223
+ generator = torch.Generator("cuda").manual_seed(0)
224
+
225
+ image = load_image("https://huggingface.co/datasets/sayakpaul/sample-datasets/resolve/main/test_pix2pix_4.png")
226
+ prompt = "add some ducks to the lake"
227
+ num_inference_steps = 20
228
+ image_guidance_scale = 1.5
229
+ guidance_scale = 10
230
+
231
+ edited_image = pipeline(
232
+ prompt,
233
+ image=image,
234
+ num_inference_steps=num_inference_steps,
235
+ image_guidance_scale=image_guidance_scale,
236
+ guidance_scale=guidance_scale,
237
+ generator=generator,
238
+ ).images[0]
239
+ edited_image.save("edited_image.png")
240
+ ```
241
+
242
+ 您应该尝试不同的 `num_inference_steps`、`image_guidance_scale` 和 `guidance_scale` 值,以查看它们如何影响推理速度和质量。指导比例参数
243
+ 这些参数尤其重要,因为它们控制原始图像和编辑指令对编辑后图像的影响程度。
244
+
245
+ ## Stable Diffusion XL
246
+
247
+ Stable Diffusion XL (SDXL) 是一个强大的文本到图像模型,能够生成高分辨率图像,并在其架构中添加了第二个文本编码器。使用 [`train_instruct_pix2pix_sdxl.py`](https://github.com/huggingface/diffusers/blob/main/examples/instruct_pix2pix/train_instruct_pix2pix_sdxl.py) 脚本来训练 SDXL 模型以遵循图像编辑指令。
248
+
249
+ SDXL 训练脚本在 [SDXL 训练](sdxl) 指南中有更详细的讨论。
250
+
251
+ ## 后续步骤
252
+
253
+ 恭喜您训练了自己的 InstructPix2Pix 模型!🥳 要了解更多关于该模型的信息,可能有助于:
254
+
255
+ - 阅读 [Instruction-tuning Stable Diffusion with InstructPix2Pix](https://huggingface.co/blog/instruction-tuning-sd) 博客文章,了解更多我们使用 InstructPix2Pix 进行的一些实验、数据集准备以及不同指令的结果。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/kandinsky.md ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权所有 2025 HuggingFace 团队。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版本("许可证")授权;除非遵守许可证,否则您不得使用此文件。您可以在以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,否则根据许可证分发的软件按"原样"分发,不附带任何明示或暗示的担保或条件。请参阅许可证以了解具体的语言管理权限和限制。
8
+ -->
9
+
10
+ # Kandinsky 2.2
11
+
12
+ <Tip warning={true}>
13
+
14
+ 此脚本是实验性的,容易过拟合并遇到灾难性遗忘等问题。尝试探索不同的超参数以在您的数据集上获得最佳结果。
15
+
16
+ </Tip>
17
+
18
+ Kandinsky 2.2 是一个多语言文本到图像模型,能够生成更逼真的图像。该模型包括一个图像先验模型,用于从文本提示创建图像嵌入,以及一个解码器模型,基于先验模型的嵌入生成图像。这就是为什么在 Diffusers 中您会找到两个独立的脚本用于 Kandinsky 2.2,一个用于训练先验模型,另一个用于训练解码器模型。您可以分别训练这两个模型,但为了获得最佳结果,您应该同时训练先验和解码器模型。
19
+
20
+ 根据您的 GPU,您可能需要启用 `gradient_checkpointing`(⚠️ 不支持先验模型!)、`mixed_precision` 和 `gradient_accumulation_steps` 来帮助将模型装入内存并加速训练。您可以通过启用 [xFormers](../optimization/xformers) 的内存高效注意力来进一步减少内存使用(版本 [v0.0.16](https://github.com/huggingface/diffusers/issues/2234#issuecomment-1416931212) 在某些 GPU 上训练时失败,因此您可能需要安装开发版本)。
21
+
22
+ 本指南探讨了 [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py) 和 [train_text_to_image_decoder.py](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py) 脚本,以帮助您更熟悉它,以及如何根据您的用例进行调整。
23
+
24
+ 在运行脚本之前,请确保从源代码安装库:
25
+
26
+ ```bash
27
+ git clone https://github.com/huggingface/diffusers
28
+ cd diffusers
29
+ pip install .
30
+ ```
31
+
32
+ 然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项:
33
+
34
+ ```bash
35
+ cd examples/kandinsky2_2/text_to_image
36
+ pip install -r requirements.txt
37
+ ```
38
+
39
+ <Tip>
40
+
41
+ 🤗 Accelerate 是一个帮助您在多个 GPU/TPU 上或使用混合精度进行训练的库。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate 的 [快速入门](https://huggingface.co/docs/accelerate/quicktour
42
+ ) 了解更多。
43
+
44
+ </Tip>
45
+
46
+ 初始化一个 🤗 Accelerate 环境:
47
+
48
+ ```bash
49
+ accelerate config
50
+ ```
51
+
52
+ 要设置一个默认的 🤗 Accelerate 环境而不选择任何配置:
53
+
54
+ ```bash
55
+ accelerate config default
56
+ ```
57
+
58
+ 或者,如果您的环境不支持交互式 shell,比如 notebook,您可以使用:
59
+
60
+ ```py
61
+ from accelerate.utils import write_basic_config
62
+
63
+ write_basic_config()
64
+ ```
65
+
66
+ 最后,如果您想在自己的数据集上训练模型,请查看 [创建用于训练的数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。
67
+
68
+ <Tip>
69
+
70
+ 以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未详细涵盖脚本的每个方面。如果您有兴趣了解更多,请随时阅读脚本,并让我们知道您有任何疑问或顾虑。
71
+
72
+ </Tip>
73
+
74
+ ## 脚本参数
75
+
76
+ 训练脚本提供了许多参数来帮助您自定义训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L190) 函数中找到。训练脚本为每个参数提供了默认值,例如训练批次大小和学习率,但如果您愿意,也可以在训练命令中设置自己的值。
77
+
78
+ 例如,要使用 fp16 格式的混合精度加速训练,请在训练命令中添加 `--mixed_precision` 参数:
79
+
80
+ ```bash
81
+ accelerate launch train_text_to_image_prior.py \
82
+ --mixed_precision="fp16"
83
+ ```
84
+
85
+ 大多数参数与 [文本到图像](text2image#script-parameters) 训练指南中的参数相同,所以让我们直接进入 Kandinsky 训练脚本的 walkthrough!
86
+
87
+ ### Min-SNR 加权
88
+
89
+ [Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略可以通过重新平衡损失来帮助训练,实现更快的收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,但 Min-SNR 与两种预测类型都兼容。此加权策略仅由 PyTorch 支持,在 Flax 训练脚本中不可用。
90
+
91
+ 添加 `--snr_gamma` 参数并将其设置为推荐值 5.0:
92
+
93
+ ```bash
94
+ accelerate launch train_text_to_image_prior.py \
95
+ --snr_gamma=5.0
96
+ ```
97
+
98
+ ## 训练脚本
99
+
100
+ 训练脚本也类似于 [文本到图像](text2image#training-script) 训练指南,但已修改以��持训练 prior 和 decoder 模型。本指南重点介绍 Kandinsky 2.2 训练脚本中独特的代码。
101
+
102
+ <hfoptions id="script">
103
+ <hfoption id="prior model">
104
+
105
+ [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L441) 函数包含代码 f
106
+ 或准备数据集和训练模型。
107
+
108
+ 您会立即注意到的主要区别之一是,训练脚本除了调度器和分词器外,还加载了一个 [`~transformers.CLIPImageProcessor`] 用于预处理图像,以及一个 [`~transformers.CLIPVisionModelWithProjection`] 模型用于编码图像:
109
+
110
+ ```py
111
+ noise_scheduler = DDPMScheduler(beta_schedule="squaredcos_cap_v2", prediction_type="sample")
112
+ image_processor = CLIPImageProcessor.from_pretrained(
113
+ args.pretrained_prior_model_name_or_path, subfolder="image_processor"
114
+ )
115
+ tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="tokenizer")
116
+
117
+ with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
118
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
119
+ args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype
120
+ ).eval()
121
+ text_encoder = CLIPTextModelWithProjection.from_pretrained(
122
+ args.pretrained_prior_model_name_or_path, subfolder="text_encoder", torch_dtype=weight_dtype
123
+ ).eval()
124
+ ```
125
+
126
+ Kandinsky 使用一个 [`PriorTransformer`] 来生成图像嵌入,因此您需要设置优化器来学习先验模型的参数。
127
+
128
+ ```py
129
+ prior = PriorTransformer.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior")
130
+ prior.train()
131
+ optimizer = optimizer_cls(
132
+ prior.parameters(),
133
+ lr=args.learning_rate,
134
+ betas=(args.adam_beta1, args.adam_beta2),
135
+ weight_decay=args.adam_weight_decay,
136
+ eps=args.adam_epsilon,
137
+ )
138
+ ```
139
+
140
+ 接下来,输入标题被分词,图像由 [`~transformers.CLIPImageProcessor`] [预处理](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L632):
141
+
142
+ ```py
143
+ def preprocess_train(examples):
144
+ images = [image.convert("RGB") for image in examples[image_column]]
145
+ examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values
146
+ examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples)
147
+ return examples
148
+ ```
149
+
150
+ 最后,[训练循环](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_prior.py#L718) 将输入图像转换为潜在表示,向图像嵌入添加噪声,并进行预测:
151
+
152
+ ```py
153
+ model_pred = prior(
154
+ noisy_latents,
155
+ timestep=timesteps,
156
+ proj_embedding=prompt_embeds,
157
+ encoder_hidden_states=text_encoder_hidden_states,
158
+ attention_mask=text_mask,
159
+ ).predicted_image_embedding
160
+ ```
161
+
162
+ 如果您想了解更多关于训练循环的工作原理,请查看 [理解管道、模型和调度器](../using-diffusers/write_own_pipeline) 教程,该教程分解了去噪过程的基本模式。
163
+
164
+ </hfoption>
165
+ <hfoption id="decoder model">
166
+
167
+ The [`main()`](https://github.com/huggingface/di
168
+ ffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L440) 函数包含准备数据集和训练模型的代码。
169
+
170
+ 与之前的模型不同,解码器初始化一个 [`VQModel`] 来将潜在变量解码为图像,并使用一个 [`UNet2DConditionModel`]:
171
+
172
+ ```py
173
+ with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
174
+ vae = VQModel.from_pretrained(
175
+ args.pretrained_decoder_model_name_or_path, subfolder="movq", torch_dtype=weight_dtype
176
+ ).eval()
177
+ image_encoder = CLIPVisionModelWithProjection.from_pretrained(
178
+ args.pretrained_prior_model_name_or_path, subfolder="image_encoder", torch_dtype=weight_dtype
179
+ ).eval()
180
+ unet = UNet2DConditionModel.from_pretrained(args.pretrained_decoder_model_name_or_path, subfolder="unet")
181
+ ```
182
+
183
+ 接下来,脚本包括几个图像变换和一个用于对图像应用变换并返回像素值的[预处理](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L622)函数:
184
+
185
+ ```py
186
+ def preprocess_train(examples):
187
+ images = [image.convert("RGB") for image in examples[image_column]]
188
+ examples["pixel_values"] = [train_transforms(image) for image in images]
189
+ examples["clip_pixel_values"] = image_processor(images, return_tensors="pt").pixel_values
190
+ return examples
191
+ ```
192
+
193
+ 最后,[训练循环](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/kandinsky2_2/text_to_image/train_text_to_image_decoder.py#L706)处理将图像转换为潜在变量、添加噪声和预测噪声残差。
194
+
195
+ 如果您想了解更多关于训练循环如何工作的信息,请查看[理解管道、模型和��度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。
196
+
197
+ ```py
198
+ model_pred = unet(noisy_latents, timesteps, None, added_cond_kwargs=added_cond_kwargs).sample[:, :4]
199
+ ```
200
+
201
+ </hfoption>
202
+ </hfoptions>
203
+
204
+ ## 启动脚本
205
+
206
+ 一旦您完成了所有更改或接受默认配置,就可以启动训练脚本了!🚀
207
+
208
+ 您将在[Naruto BLIP 字幕](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions)数据集上进行训练,以生成您自己的Naruto角色,但您也可以通过遵循[创建用于训练的数据集](create_dataset)指南来创建和训练您自己的数据集。将环境变量 `DATASET_NAME` 设置为Hub上数据集的名称,或者如果您在自己的文件上训练,将环境变量 `TRAIN_DIR` 设置为数据集的路径。
209
+
210
+ 如果您在多个GPU上训练,请在 `accelerate launch` 命令中添加 `--multi_gpu` 参数。
211
+
212
+ <Tip>
213
+
214
+ 要使用Weights & Biases监控训练进度,请在训练命令中添加 `--report_to=wandb` 参数。您还需要
215
+ 建议在训练命令中添加 `--validation_prompt` 以跟踪结果。这对于调试模型和查看中间结果非常有用。
216
+
217
+ </Tip>
218
+
219
+ <hfoptions id="training-inference">
220
+ <hfoption id="prior model">
221
+
222
+ ```bash
223
+ export DATASET_NAME="lambdalabs/naruto-blip-captions"
224
+
225
+ accelerate launch --mixed_precision="fp16" train_text_to_image_prior.py \
226
+ --dataset_name=$DATASET_NAME \
227
+ --resolution=768 \
228
+ --train_batch_size=1 \
229
+ --gradient_accumulation_steps=4 \
230
+ --max_train_steps=15000 \
231
+ --learning_rate=1e-05 \
232
+ --max_grad_norm=1 \
233
+ --checkpoints_total_limit=3 \
234
+ --lr_scheduler="constant" \
235
+ --lr_warmup_steps=0 \
236
+ --validation_prompts="A robot naruto, 4k photo" \
237
+ --report_to="wandb" \
238
+ --push_to_hub \
239
+ --output_dir="kandi2-prior-naruto-model"
240
+ ```
241
+
242
+ </hfoption>
243
+ <hfoption id="decoder model">
244
+
245
+ ```bash
246
+ export DATASET_NAME="lambdalabs/naruto-blip-captions"
247
+
248
+ accelerate launch --mixed_precision="fp16" train_text_to_image_decoder.py \
249
+ --dataset_name=$DATASET_NAME \
250
+ --resolution=768 \
251
+ --train_batch_size=1 \
252
+ --gradient_accumulation_steps=4 \
253
+ --gradient_checkpointing \
254
+ --max_train_steps=15000 \
255
+ --learning_rate=1e-05 \
256
+ --max_grad_norm=1 \
257
+ --checkpoints_total_limit=3 \
258
+ --lr_scheduler="constant" \
259
+ --lr_warmup_steps=0 \
260
+ --validation_prompts="A robot naruto, 4k photo" \
261
+ --report_to="wandb" \
262
+ --push_to_hub \
263
+ --output_dir="kandi2-decoder-naruto-model"
264
+ ```
265
+
266
+ </hfoption>
267
+ </hfoptions>
268
+
269
+ 训练完成后,您可以使用新训练的模型进行推理!
270
+
271
+ <hfoptions id="training-inference">
272
+ <hfoption id="prior model">
273
+
274
+ ```py
275
+ from diffusers import AutoPipelineForText2Image, DiffusionPipeline
276
+ import torch
277
+
278
+ prior_pipeline = DiffusionPipeline.from_pretrained(output_dir, torch_dtype=torch.float16)
279
+ prior_components = {"prior_" + k: v for k,v in prior_pipeline.components.items()}
280
+ pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", **prior_components, torch_dtype=torch.float16)
281
+
282
+ pipe.enable_model_cpu_offload()
283
+ prompt="A robot naruto, 4k photo"
284
+ image = pipeline(prompt=prompt, negative_prompt=negative_prompt).images[0]
285
+ ```
286
+
287
+ <Tip>
288
+
289
+ 可以随意将 `kandinsky-community/kandinsky-2-2-decoder` 替换为您自己训练的 decoder 检查点!
290
+
291
+ </Tip>
292
+
293
+ </hfoption>
294
+ <hfoption id="decoder model">
295
+
296
+ ```py
297
+ from diffusers import AutoPipelineForText2Image
298
+ import torch
299
+
300
+ pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16)
301
+ pipeline.enable_model_cpu_offload()
302
+
303
+ prompt="A robot naruto, 4k photo"
304
+ image = pipeline(prompt=prompt).images[0]
305
+ ```
306
+
307
+ 对于 decoder 模型,您还可以从保存的检查点进行推理,这对于查看中间结果很有用。在这种情况下,将检查点加载到 UNet 中:
308
+
309
+ ```py
310
+ from diffusers import AutoPipelineForText2Image, UNet2DConditionModel
311
+
312
+ unet = UNet2DConditionModel.from_pretrained("path/to/saved/model" + "/checkpoint-<N>/unet")
313
+
314
+ pipeline = AutoPipelineForText2Image.from_pretrained("kandinsky-community/kandinsky-2-2-decoder", unet=unet, torch_dtype=torch.float16)
315
+ pipeline.enable_model_cpu_offload()
316
+
317
+ image = pipeline(prompt="A robot naruto, 4k photo").images[0]
318
+ ```
319
+
320
+ </hfoption>
321
+ </hfoptions>
322
+
323
+ ## 后续步骤
324
+
325
+ 恭喜您训练了一个 Kandinsky 2.2 模型!要了解更多关于如何使用您的新模型的信息,以下指南可能会有所帮助:
326
+
327
+ - 阅读 [Kandinsky](../using-diffusers/kandinsky) 指南,学习如何将其用于各种不同的任务(文本到图像、图像到图像、修复、插值),以及如何与 ControlNet 结合使用。
328
+ - 查看 [DreamBooth](dreambooth) 和 [LoRA](lora) 训练指南,学习如何使用少量示例图像训练个性化的 Kandinsky 模型。这两种训练技术甚至可以结合使用!
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/lora.md ADDED
@@ -0,0 +1,231 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
+ the License. You may obtain a copy of the License at
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
+ specific language governing permissions and limitations under the License.
11
+ -->
12
+
13
+ # LoRA 低秩适配
14
+
15
+ <Tip warning={true}>
16
+
17
+ 当前功能处于实验阶段,API可能在未来版本中变更。
18
+
19
+ </Tip>
20
+
21
+ [LoRA(大语言模型的低秩适配)](https://hf.co/papers/2106.09685) 是一种轻量级训练技术,能显著减少可训练参数量。其原理是通过向模型注入少量新权重参数,仅训练这些新增参数。这使得LoRA训练速度更快、内存效率更高,并生成更小的模型权重文件(通常仅数百MB),便于存储和分享。LoRA还可与DreamBooth等其他训练技术结合以加速训练过程。
22
+
23
+ <Tip>
24
+
25
+ LoRA具有高度通用性,目前已支持以下应用场景:[DreamBooth](https://github.com/huggingface/diffusers/blob/main/examples/dreambooth/train_dreambooth_lora.py)、[Kandinsky 2.2](https://github.com/huggingface/diffusers/blob/main/examples/kandinsky2_2/text_to_image/train_text_to_image_lora_decoder.py)、[Stable Diffusion XL](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora_sdxl.py)、[文生图](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)以及[Wuerstchen](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_lora_prior.py)。
26
+
27
+ </Tip>
28
+
29
+ 本指南将通过解析[train_text_to_image_lora.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py)脚本,帮助您深入理解其工作原理,并掌握如何针对具体需求进行定制化修改。
30
+
31
+ 运行脚本前,请确保从源码安装库:
32
+
33
+ ```bash
34
+ git clone https://github.com/huggingface/diffusers
35
+ cd diffusers
36
+ pip install .
37
+ ```
38
+
39
+ 进入包含训练脚本的示例目录,并安装所需依赖:
40
+
41
+ <hfoptions id="installation">
42
+ <hfoption id="PyTorch">
43
+
44
+ ```bash
45
+ cd examples/text_to_image
46
+ pip install -r requirements.txt
47
+ ```
48
+
49
+ </hfoption>
50
+ <hfoption id="Flax">
51
+
52
+ ```bash
53
+ cd examples/text_to_image
54
+ pip install -r requirements_flax.txt
55
+ ```
56
+
57
+ </hfoption>
58
+ </hfoptions>
59
+
60
+ <Tip>
61
+
62
+ 🤗 Accelerate是一个支持多GPU/TPU训练和混合精度计算的库,它能根据硬件环境自动配置训练方案。参阅🤗 Accelerate[快速入门](https://huggingface.co/docs/accelerate/quicktour)了解更多。
63
+
64
+ </Tip>
65
+
66
+ 初始化🤗 Accelerate环境:
67
+
68
+ ```bash
69
+ accelerate config
70
+ ```
71
+
72
+ 若要创建默认配置环境(不进行交互式设置):
73
+
74
+ ```bash
75
+ accelerate config default
76
+ ```
77
+
78
+ 若在非交互环境(如Jupyter notebook)中使用:
79
+
80
+ ```py
81
+ from accelerate.utils import write_basic_config
82
+
83
+ write_basic_config()
84
+ ```
85
+
86
+ 如需训练自定义数据集,请参考[创建训练数据集指南](create_dataset)了解数据准备流程。
87
+
88
+ <Tip>
89
+
90
+ 以下章节重点解析训练脚本中与LoRA相关的核心部分,但不会涵盖所有实现细节。如需完整理解,建议直接阅读[脚本源码](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image_lora.py),如有疑问欢迎反馈。
91
+
92
+ </Tip>
93
+
94
+ ## 脚本参数
95
+
96
+ 训练脚本提供众多参数用于定制训练过程。所有参数及其说明均定义在[`parse_args()`](https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L85)函数中。多数参数设有默认值,您也可以通过命令行参数覆盖:
97
+
98
+ 例如增加训练轮次:
99
+
100
+ ```bash
101
+ accelerate launch train_text_to_image_lora.py \
102
+ --num_train_epochs=150 \
103
+ ```
104
+
105
+ 基础参数说明可参考[文生图训练指南](text2image#script-parameters),此处重点介绍LoRA相关参数:
106
+
107
+ - `--rank`:低秩矩阵的内部维度,数值越高可训练参数越多
108
+ - `--learning_rate`:默认学习率为1e-4,但使用LoRA时可适当提高
109
+
110
+ ## 训练脚本实现
111
+
112
+ 数据集预处理和训练循环逻辑位于[`main()`](https://github.com/huggingface/diffusers/blob/dd9a5caf61f04d11c0fa9f3947b69ab0010c9a0f/examples/text_to_image/train_text_to_image_lora.py#L371)函数,如需定制训练流程,可在此处进行修改。
113
+
114
+ 与参数说明类似,训练流程的完整解析请参考[文生图指南](text2image#training-script),下文重点介绍LoRA相关实现。
115
+
116
+ <hfoptions id="lora">
117
+ <hfoption id="UNet">
118
+
119
+ Diffusers使用[PEFT](https://hf.co/docs/peft)库的[`~peft.LoraConfig`]配置LoRA适配器参数,包括秩(rank)、alpha值以及目标模块。适配器被注入UNet后,通过`lora_layers`筛选出需要优化的LoRA层。
120
+
121
+ ```py
122
+ unet_lora_config = LoraConfig(
123
+ r=args.rank,
124
+ lora_alpha=args.rank,
125
+ init_lora_weights="gaussian",
126
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
127
+ )
128
+
129
+ unet.add_adapter(unet_lora_config)
130
+ lora_layers = filter(lambda p: p.requires_grad, unet.parameters())
131
+ ```
132
+
133
+ </hfoption>
134
+ <hfoption id="text encoder">
135
+
136
+ 当需要微调文本编码器时(如SDXL模型),Diffusers同样支持通过[PEFT](https://hf.co/docs/peft)库实现。[`~peft.LoraConfig`]配置适配器参数后注入文本编码器,并筛选LoRA层进行训练。
137
+
138
+ ```py
139
+ text_lora_config = LoraConfig(
140
+ r=args.rank,
141
+ lora_alpha=args.rank,
142
+ init_lora_weights="gaussian",
143
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
144
+ )
145
+
146
+ text_encoder_one.add_adapter(text_lora_config)
147
+ text_encoder_two.add_adapter(text_lora_config)
148
+ text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
149
+ text_lora_parameters_two = list(filter(lambda p: p.requires_grad, text_encoder_two.parameters()))
150
+ ```
151
+
152
+ </hfoption>
153
+ </hfoptions>
154
+
155
+ [优化器](https://github.com/huggingface/diffusers/blob/e4b8f173b97731686e290b2eb98e7f5df2b1b322/examples/text_to_image/train_text_to_image_lora.py#L529)仅对`lora_layers`参数进行优化:
156
+
157
+ ```py
158
+ optimizer = optimizer_cls(
159
+ lora_layers,
160
+ lr=args.learning_rate,
161
+ betas=(args.adam_beta1, args.adam_beta2),
162
+ weight_decay=args.adam_weight_decay,
163
+ eps=args.adam_epsilon,
164
+ )
165
+ ```
166
+
167
+ 除LoRA层设置外,该训练脚本与标准train_text_to_image.py基本相同!
168
+
169
+ ## 启动训练
170
+
171
+ 完成所有配置后,即可启动训练脚本!🚀
172
+
173
+ 以下示例使用[Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions)训练生成火影角色。请设置环境变量`MODEL_NAME`和`DATASET_NAME`指定基础模型和数据集,`OUTPUT_DIR`设置输出目录,`HUB_MODEL_ID`指定Hub存储库名称。脚本运行后将生成以下文件:
174
+
175
+ - 模型检查点
176
+ - `pytorch_lora_weights.safetensors`(训练好的LoRA权重)
177
+
178
+ 多GPU训练请添加`--multi_gpu`参数。
179
+
180
+ <Tip warning={true}>
181
+
182
+ 在11GB显存的2080 Ti显卡上完整训练约需5小时。
183
+
184
+ </Tip>
185
+
186
+ ```bash
187
+ export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
188
+ export OUTPUT_DIR="/sddata/finetune/lora/naruto"
189
+ export HUB_MODEL_ID="naruto-lora"
190
+ export DATASET_NAME="lambdalabs/naruto-blip-captions"
191
+
192
+ accelerate launch --mixed_precision="fp16" train_text_to_image_lora.py \
193
+ --pretrained_model_name_or_path=$MODEL_NAME \
194
+ --dataset_name=$DATASET_NAME \
195
+ --dataloader_num_workers=8 \
196
+ --resolution=512 \
197
+ --center_crop \
198
+ --random_flip \
199
+ --train_batch_size=1 \
200
+ --gradient_accumulation_steps=4 \
201
+ --max_train_steps=15000 \
202
+ --learning_rate=1e-04 \
203
+ --max_grad_norm=1 \
204
+ --lr_scheduler="cosine" \
205
+ --lr_warmup_steps=0 \
206
+ --output_dir=${OUTPUT_DIR} \
207
+ --push_to_hub \
208
+ --hub_model_id=${HUB_MODEL_ID} \
209
+ --report_to=wandb \
210
+ --checkpointing_steps=500 \
211
+ --validation_prompt="蓝色眼睛的火影忍者角色" \
212
+ --seed=1337
213
+ ```
214
+
215
+ 训练完成后,您可以通过以下方式进行推理:
216
+
217
+ ```py
218
+ from diffusers import AutoPipelineForText2Image
219
+ import torch
220
+
221
+ pipeline = AutoPipelineForText2Image.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
222
+ pipeline.load_lora_weights("path/to/lora/model", weight_name="pytorch_lora_weights.safetensors")
223
+ image = pipeline("A naruto with blue eyes").images[0]
224
+ ```
225
+
226
+ ## 后续步骤
227
+
228
+ 恭喜完成LoRA模型训练!如需进一步了解模型使用方法,可参考以下指南:
229
+
230
+ - 学习如何加载[不同格式的LoRA权重](../using-diffusers/loading_adapters#LoRA)(如Kohya或TheLastBen训练的模型)
231
+ - 掌握使用PEFT进行[多LoRA组合推理](../tutorials/using_peft_for_inference)的技巧
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/overview.md ADDED
@@ -0,0 +1,60 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ 根据 Apache License 2.0 版本("许可证")授权,除非符合许可证要求,否则不得使用此文件。您可以通过以下网址获取许可证副本:
4
+
5
+ http://www.apache.org/licenses/LICENSE-2.0
6
+
7
+ 除非适用法律要求或书面同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。
8
+ -->
9
+
10
+ # 概述
11
+
12
+ 🤗 Diffusers 提供了一系列训练脚本供您训练自己的diffusion模型。您可以在 [diffusers/examples](https://github.com/huggingface/diffusers/tree/main/examples) 找到所有训练脚本。
13
+
14
+ 每个训练脚本具有以下特点:
15
+
16
+ - **独立完整**:训练脚本不依赖任何本地文件,所有运行所需的包都通过 `requirements.txt` 文件安装
17
+ - **易于调整**:这些脚本是针对特定任务的训练示例,并不能开箱即用地适用于所有训练场景。您可能需要根据具体用例调整脚本。为此,我们完全公开了数据预处理代码和训练循环,方便您进行修改
18
+ - **新手友好**:脚本设计注重易懂性和入门友好性,而非包含最新最优方法以获得最具竞争力的结果。我们有意省略了过于复杂的训练方法
19
+ - **单一用途**:每个脚本仅针对一个任务设计,确保代码可读性和可理解性
20
+
21
+ 当前提供的训练脚本包括:
22
+
23
+ | 训练类型 | 支持SDXL | 支持LoRA | 支持Flax |
24
+ |---|---|---|---|
25
+ | [unconditional image generation](https://github.com/huggingface/diffusers/tree/main/examples/unconditional_image_generation) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb) | | | |
26
+ | [text-to-image](https://github.com/huggingface/diffusers/tree/main/examples/text_to_image) | 👍 | 👍 | 👍 |
27
+ | [textual inversion](https://github.com/huggingface/diffusers/tree/main/examples/textual_inversion) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb) | | | 👍 |
28
+ | [DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth) [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb) | 👍 | 👍 | 👍 |
29
+ | [ControlNet](https://github.com/huggingface/diffusers/tree/main/examples/controlnet) | 👍 | | 👍 |
30
+ | [InstructPix2Pix](https://github.com/huggingface/diffusers/tree/main/examples/instruct_pix2pix) | 👍 | | |
31
+ | [Custom Diffusion](https://github.com/huggingface/diffusers/tree/main/examples/custom_diffusion) | | | |
32
+ | [T2I-Adapters](https://github.com/huggingface/diffusers/tree/main/examples/t2i_adapter) | 👍 | | |
33
+ | [Kandinsky 2.2](https://github.com/huggingface/diffusers/tree/main/examples/kandinsky2_2/text_to_image) | | 👍 | |
34
+ | [Wuerstchen](https://github.com/huggingface/diffusers/tree/main/examples/wuerstchen/text_to_image) | | 👍 | |
35
+
36
+ 这些示例处于**积极维护**状态,如果遇到问题请随时提交issue。如果您认为应该添加其他训练示例,欢迎创建[功能请求](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=)与我们讨论,我们将评估其是否符合独立完整、易于调整、新手友好和单一用途的标准。
37
+
38
+ ## 安装
39
+
40
+ 请按照以下步骤在新虚拟环境中从源码安装库,确保能成功运行最新版本的示例脚本:
41
+
42
+ ```bash
43
+ git clone https://github.com/huggingface/diffusers
44
+ cd diffusers
45
+ pip install .
46
+ ```
47
+
48
+ 然后进入具体训练脚本目录(例如[DreamBooth](https://github.com/huggingface/diffusers/tree/main/examples/dreambooth)),安装对应的`requirements.txt`文件。部分脚本针对SDXL、LoRA或Flax有特定要求文件,使用时请确保安装对应文件。
49
+
50
+ ```bash
51
+ cd examples/dreambooth
52
+ pip install -r requirements.txt
53
+ # 如需用DreamBooth训练SDXL
54
+ pip install -r requirements_sdxl.txt
55
+ ```
56
+
57
+ 为加速训练并降低内存消耗,我们建议:
58
+
59
+ - 使用PyTorch 2.0或更高版本,自动启用[缩放点积注意力](../optimization/fp16#scaled-dot-product-attention)(无需修改训练代码)
60
+ - 安装[xFormers](../optimization/xformers)以启用内存高效注意力机制
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/text2image.md ADDED
@@ -0,0 +1,275 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
+ the License. You may obtain a copy of the License at
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
+ specific language governing permissions and limitations under the License.
11
+ -->
12
+
13
+ # 文生图
14
+
15
+ <Tip warning={true}>
16
+
17
+ 文生图训练脚本目前处于实验阶段,容易出现过拟合和灾难性遗忘等问题。建议尝试不同超参数以获得最佳数据集适配效果。
18
+
19
+ </Tip>
20
+
21
+ Stable Diffusion 等文生图模型能够根据文本提示生成对应图像。
22
+
23
+ 模型训练对硬件要求较高,但启用 `gradient_checkpointing` 和 `mixed_precision` 后,可在单块24GB显存GPU上完成训练。如需更大批次或更快训练速度,建议使用30GB以上显存的GPU设备。通过启用 [xFormers](../optimization/xformers) 内存高效注意力机制可降低显存占用。JAX/Flax 训练方案也支持TPU/GPU高效训练,但不支持梯度检查点、梯度累积和xFormers。使用Flax训练时建议配备30GB以上显存GPU或TPU v3。
24
+
25
+ 本指南将详解 [train_text_to_image.py](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py) 训练脚本,助您掌握其原理并适配自定义需求。
26
+
27
+ 运行脚本前请确保已从源码安装库:
28
+
29
+ ```bash
30
+ git clone https://github.com/huggingface/diffusers
31
+ cd diffusers
32
+ pip install .
33
+ ```
34
+
35
+ 然后进入包含训练脚本的示例目录,安装对应依赖:
36
+
37
+ <hfoptions id="installation">
38
+ <hfoption id="PyTorch">
39
+ ```bash
40
+ cd examples/text_to_image
41
+ pip install -r requirements.txt
42
+ ```
43
+ </hfoption>
44
+ <hfoption id="Flax">
45
+ ```bash
46
+ cd examples/text_to_image
47
+ pip install -r requirements_flax.txt
48
+ ```
49
+ </hfoption>
50
+ </hfoptions>
51
+
52
+ <Tip>
53
+
54
+ 🤗 Accelerate 是支持多GPU/TPU训练和混合精度的工具库,能根据硬件环境自动配置训练参数。参阅 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 了解更多。
55
+
56
+ </Tip>
57
+
58
+ 初始化 🤗 Accelerate 环境:
59
+
60
+ ```bash
61
+ accelerate config
62
+ ```
63
+
64
+ 要创建默认配置环境(不进行交互式选择):
65
+
66
+ ```bash
67
+ accelerate config default
68
+ ```
69
+
70
+ 若环境不支持交互式shell(如notebook),可使用:
71
+
72
+ ```py
73
+ from accelerate.utils import write_basic_config
74
+
75
+ write_basic_config()
76
+ ```
77
+
78
+ 最后,如需在自定义数据集上训练,请参阅 [创建训练数据集](create_dataset) 指南了解如何准备适配脚本的数据集。
79
+
80
+ ## 脚本参数
81
+
82
+ <Tip>
83
+
84
+ 以下重点介绍脚本中影响训练效果的关键参数,如需完整参数说明可查阅 [脚本源码](https://github.com/huggingface/diffusers/blob/main/examples/text_to_image/train_text_to_image.py)。如有疑问欢迎反馈。
85
+
86
+ </Tip>
87
+
88
+ 训练脚本提供丰富参数供自定义训练流程,所有参数及说明详见 [`parse_args()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L193) 函数。该函数为每个参数提供默认值(如批次大小、学习率等),也可通过命令行参数覆盖。
89
+
90
+ 例如使用fp16混合精度加速训练:
91
+
92
+ ```bash
93
+ accelerate launch train_text_to_image.py \
94
+ --mixed_precision="fp16"
95
+ ```
96
+
97
+ 基础重要参数包括:
98
+
99
+ - `--pretrained_model_name_or_path`: Hub模型名称或本地预训练模型路径
100
+ - `--dataset_name`: Hub数据集名称或本地训练数据集路径
101
+ - `--image_column`: 数据集中图像列名
102
+ - `--caption_column`: 数据集中文本列名
103
+ - `--output_dir`: 模型保存路径
104
+ - `--push_to_hub`: 是否将训练模型推送至Hub
105
+ - `--checkpointing_steps`: 模型检查点保存步数;训练中断时可添加 `--resume_from_checkpoint` 从该检查点恢复训练
106
+
107
+ ### Min-SNR加权策略
108
+
109
+ [Min-SNR](https://huggingface.co/papers/2303.09556) 加权策略通过重新平衡损失函数加速模型收敛。训练脚本支持预测 `epsilon`(噪声)或 `v_prediction`,而Min-SNR兼容两种预测类型。该策略仅限PyTorch版本,Flax训练脚本不支持。
110
+
111
+ 添加 `--snr_gamma` 参数并设为推荐值5.0:
112
+
113
+ ```bash
114
+ accelerate launch train_text_to_image.py \
115
+ --snr_gamma=5.0
116
+ ```
117
+
118
+ 可通过此 [Weights and Biases](https://wandb.ai/sayakpaul/text2image-finetune-minsnr) 报告比较不同 `snr_gamma` 值的损失曲面。小数据集上Min-SNR效果可能不如大数据集显著。
119
+
120
+ ## 训练脚本解析
121
+
122
+ 数据集预处理代码和训练循环位于 [`main()`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L490) 函数,自定义修改需在此处进行。
123
+
124
+ `train_text_to_image` 脚本首先 [加载调度器](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L543) 和分词器,此处可替换其他调度器:
125
+
126
+ ```py
127
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
128
+ tokenizer = CLIPTokenizer.from_pretrained(
129
+ args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
130
+ )
131
+ ```
132
+
133
+ 接着 [加载UNet模型](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L619):
134
+
135
+ ```py
136
+ load_model = UNet2DConditionModel.from_pretrained(input_dir, subfolder="unet")
137
+ model.register_to_config(**load_model.config)
138
+
139
+ model.load_state_dict(load_model.state_dict())
140
+ ```
141
+
142
+ 随后对数据集的文本和图像列进行预处理。[`tokenize_captions`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L724) 函数处理文本分词,[`train_transforms`](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L742) 定义图像增强策略,二者集成于 `preprocess_train`:
143
+
144
+ ```py
145
+ def preprocess_train(examples):
146
+ images = [image.convert("RGB") for image in examples[image_column]]
147
+ examples["pixel_values"] = [train_transforms(image) for image in images]
148
+ examples["input_ids"] = tokenize_captions(examples)
149
+ return examples
150
+ ```
151
+
152
+ 最后,[训练循环](https://github.com/huggingface/diffusers/blob/8959c5b9dec1c94d6ba482c94a58d2215c5fd026/examples/text_to_image/train_text_to_image.py#L878) 处理剩余流程:图像编码为潜空间、添加噪声、计算文本嵌入条件、更新模型参数、保存并推送模型至Hub。想深入了解训练循环原理,可参阅 [理解管道、模型与调度器](../using-diffusers/write_own_pipeline) 教程,该教程解析了去噪过程的核心逻辑。
153
+
154
+ ## 启动脚本
155
+
156
+ 完成所有配置后,即可启动训练脚本!🚀
157
+
158
+ <hfoptions id="training-inference">
159
+ <hfoption id="PyTorch">
160
+
161
+ 以 [火影忍者BLIP标注数据集](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions) 为例训练生成火影角色。设置环境变量 `MODEL_NAME` 和 `dataset_name` 指定模型和数据集(Hub或本地路径)。多GPU训练需在 `accelerate launch` 命令中添加 `--multi_gpu` 参数。
162
+
163
+ <Tip>
164
+
165
+ 使用本地数据集时,设置 `TRAIN_DIR` 和 `OUTPUT_DIR` 环境变量为数据集路径和模型保存路径。
166
+
167
+ </Tip>
168
+
169
+ ```bash
170
+ export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
171
+ export dataset_name="lambdalabs/naruto-blip-captions"
172
+
173
+ accelerate launch --mixed_precision="fp16" train_text_to_image.py \
174
+ --pretrained_model_name_or_path=$MODEL_NAME \
175
+ --dataset_name=$dataset_name \
176
+ --use_ema \
177
+ --resolution=512 --center_crop --random_flip \
178
+ --train_batch_size=1 \
179
+ --gradient_accumulation_steps=4 \
180
+ --gradient_checkpointing \
181
+ --max_train_steps=15000 \
182
+ --learning_rate=1e-05 \
183
+ --max_grad_norm=1 \
184
+ --enable_xformers_memory_efficient_attention \
185
+ --lr_scheduler="constant" --lr_warmup_steps=0 \
186
+ --output_dir="sd-naruto-model" \
187
+ --push_to_hub
188
+ ```
189
+
190
+ </hfoption>
191
+ <hfoption id="Flax">
192
+
193
+ Flax训练方案在TPU/GPU上效率更高(由 [@duongna211](https://github.com/duongna21) 实现),TPU性能更优但GPU表现同样出色。
194
+
195
+ 设置环境变量 `MODEL_NAME` 和 `dataset_name` 指定模型和数据集(Hub或本地路径)。
196
+
197
+ <Tip>
198
+
199
+ 使用本地数据集时,设置 `TRAIN_DIR` 和 `OUTPUT_DIR` 环境变量为数据集路径和模型保存路径。
200
+
201
+ </Tip>
202
+
203
+ ```bash
204
+ export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
205
+ export dataset_name="lambdalabs/naruto-blip-captions"
206
+
207
+ python train_text_to_image_flax.py \
208
+ --pretrained_model_name_or_path=$MODEL_NAME \
209
+ --dataset_name=$dataset_name \
210
+ --resolution=512 --center_crop --random_flip \
211
+ --train_batch_size=1 \
212
+ --max_train_steps=15000 \
213
+ --learning_rate=1e-05 \
214
+ --max_grad_norm=1 \
215
+ --output_dir="sd-naruto-model" \
216
+ --push_to_hub
217
+ ```
218
+
219
+ </hfoption>
220
+ </hfoptions>
221
+
222
+ 训练完成后,即可使用新模型进行推理:
223
+
224
+ <hfoptions id="training-inference">
225
+ <hfoption id="PyTorch">
226
+
227
+ ```py
228
+ from diffusers import StableDiffusionPipeline
229
+ import torch
230
+
231
+ pipeline = StableDiffusionPipeline.from_pretrained("path/to/saved_model", torch_dtype=torch.float16, use_safetensors=True).to("cuda")
232
+
233
+ image = pipeline(prompt="yoda").images[0]
234
+ image.save("yoda-naruto.png")
235
+ ```
236
+
237
+ </hfoption>
238
+ <hfoption id="Flax">
239
+
240
+ ```py
241
+ import jax
242
+ import numpy as np
243
+ from flax.jax_utils import replicate
244
+ from flax.training.common_utils import shard
245
+ from diffusers import FlaxStableDiffusionPipeline
246
+
247
+ pipeline, params = FlaxStableDiffusionPipeline.from_pretrained("path/to/saved_model", dtype=jax.numpy.bfloat16)
248
+
249
+ prompt = "yoda naruto"
250
+ prng_seed = jax.random.PRNGKey(0)
251
+ num_inference_steps = 50
252
+
253
+ num_samples = jax.device_count()
254
+ prompt = num_samples * [prompt]
255
+ prompt_ids = pipeline.prepare_inputs(prompt)
256
+
257
+ # 分片输入和随机数
258
+ params = replicate(params)
259
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
260
+ prompt_ids = shard(prompt_ids)
261
+
262
+ images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
263
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
264
+ image.save("yoda-naruto.png")
265
+ ```
266
+
267
+ </hfoption>
268
+ </hfoptions>
269
+
270
+ ## 后续步骤
271
+
272
+ 恭喜完成文生图模型训练!如需进一步使用模型,以下指南可能有所帮助:
273
+
274
+ - 了解如何加载 [LoRA权重](../using-diffusers/loading_adapters#LoRA) 进行推理(如果训练时使用了LoRA)
275
+ - 在 [文生图](../using-diffusers/conditional_image_generation) 任务指南中,了解引导尺度等参数或提示词加权等技术如何控制生成效果
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/text_inversion.md ADDED
@@ -0,0 +1,296 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--版权声明 2025 由 HuggingFace 团队所有。保留所有权利。
2
+
3
+ 根据 Apache 许可证 2.0 版("许可证")授权;除非符合许可证要求,否则不得使用本文件。
4
+ 您可以通过以下网址获取许可证副本:
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ 除非适用法律要求或书面同意,本软件按"原样"分发,不附带任何明示或暗示的担保或条件。详见许可证中规定的特定语言权限和限制。
9
+ -->
10
+
11
+ # 文本反转(Textual Inversion)
12
+
13
+ [文本反转](https://hf.co/papers/2208.01618)是一种训练技术,仅需少量示例图像即可个性化图像生成模型。该技术通过学习和更新文本嵌入(新嵌入会绑定到提示中必须使用的特殊词汇)来匹配您提供的示例图像。
14
+
15
+ 如果在显存有限的GPU上训练,建议在训练命令中启用`gradient_checkpointing`和`mixed_precision`参数。您还可以通过[xFormers](../optimization/xformers)使用内存高效注意力机制来减少内存占用。JAX/Flax训练也支持在TPU和GPU上进行高效训练,但不支持梯度检查点或xFormers。在配置与PyTorch相同的情况下,Flax训练脚本的速度至少应快70%!
16
+
17
+ 本指南将探索[textual_inversion.py](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py)脚本,帮助您更熟悉其工作原理,并了解如何根据自身需求进行调整。
18
+
19
+ 运行脚本前,请确保从源码安装库:
20
+
21
+ ```bash
22
+ git clone https://github.com/huggingface/diffusers
23
+ cd diffusers
24
+ pip install .
25
+ ```
26
+
27
+ 进入包含训练脚本的示例目录,并安装所需依赖:
28
+
29
+ <hfoptions id="installation">
30
+ <hfoption id="PyTorch">
31
+
32
+ ```bash
33
+ cd examples/textual_inversion
34
+ pip install -r requirements.txt
35
+ ```
36
+
37
+ </hfoption>
38
+ <hfoption id="Flax">
39
+
40
+ ```bash
41
+ cd examples/textual_inversion
42
+ pip install -r requirements_flax.txt
43
+ ```
44
+
45
+ </hfoption>
46
+ </hfoptions>
47
+
48
+ <Tip>
49
+
50
+ 🤗 Accelerate 是一个帮助您在多GPU/TPU或混合精度环境下训练的工具库。它会根据硬件和环境自动配置训练设置。查看🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour)了解更多。
51
+
52
+ </Tip>
53
+
54
+ 初始化🤗 Accelerate环境:
55
+
56
+ ```bash
57
+ accelerate config
58
+ ```
59
+
60
+ 要设置默认的🤗 Accelerate环境(不选择任何配置):
61
+
62
+ ```bash
63
+ accelerate config default
64
+ ```
65
+
66
+ 如果您的环境不支持交互式shell(如notebook),可以使用:
67
+
68
+ ```py
69
+ from accelerate.utils import write_basic_config
70
+
71
+ write_basic_config()
72
+ ```
73
+
74
+ 最后,如果想在自定义数据集上训练模型,请参阅[创建训练数据集](create_dataset)指南,了解如何创建适用于训练脚本的数据集。
75
+
76
+ <Tip>
77
+
78
+ 以下部分重点介绍训练脚本中需要理解的关键修改点,但未涵盖脚本所有细节。如需深入了解,可随时查阅[脚本源码](https://github.com/huggingface/diffusers/blob/main/examples/textual_inversion/textual_inversion.py),如有疑问欢迎反馈。
79
+
80
+ </Tip>
81
+
82
+ ## 脚本参数
83
+
84
+ 训练脚本包含众多参数,便于您定制训练过程。所有参数及其说明都列在[`parse_args()`](https://github.com/huggingface/diffusers/blob/839c2a5ece0af4e75530cb520d77bc7ed8acf474/examples/textual_inversion/textual_inversion.py#L176)函数中。Diffusers为每个参数提供了默认值(如训练批次大小和学习率),但您可以通过训练命令自由调整这些值。
85
+
86
+ 例如,将梯度累积步数增加到默认值1以上:
87
+
88
+ ```bash
89
+ accelerate launch textual_inversion.py \
90
+ --gradient_accumulation_steps=4
91
+ ```
92
+
93
+ 其他需要指定的基础重要参数包括:
94
+
95
+ - `--pretrained_model_name_or_path`:Hub上的模型名称或本地预训练模型路径
96
+ - `--train_data_dir`:包含训练数据集(示例图像)的文件夹路径
97
+ - `--output_dir`:训练模型保存位置
98
+ - `--push_to_hub`:是否将训练好的模型推送至Hub
99
+ - `--checkpointing_steps`:训练过程中保存检查点的频率;若训练意外中断,可通过在命令中添加`--resume_from_checkpoint`从该检查点恢复训练
100
+ - `--num_vectors`:学习嵌入的向量数量;增加此参数可提升模型效果,但会提高训练成本
101
+ - `--placeholder_token`:绑定学习嵌入的特殊词汇(推理时需在提示中使用该词)
102
+ - `--initializer_token`:大致描述训练目标的单字词汇(如物体或风格)
103
+ - `--learnable_property`:训练目标是学习新"风格"(如梵高画风)还是"物体"(如您的宠物狗)
104
+
105
+ ## 训练脚本
106
+
107
+ 与其他训练脚本不同,textual_inversion.py包含自定义数据集类[`TextualInversionDataset`](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L487),用于创建数据集。您可以自定义图像尺寸、占位符词汇、插值方法、是否裁剪图像等。如需修改数据集创建方式,可调整`TextualInversionDataset`类。
108
+
109
+ 接下来,在[`main()`](https://github.com/huggingface/diffusers/blob/839c2a5ece0af4e75530cb520d77bc7ed8acf474/examples/textual_inversion/textual_inversion.py#L573)函数中可找到数据集预处理代码和训练循环。
110
+
111
+ 脚本首先加载[tokenizer](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L616)、[scheduler和模型](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L622):
112
+
113
+ ```py
114
+ # 加载tokenizer
115
+ if args.tokenizer_name:
116
+ tokenizer = CLIPTokenizer.from_pretrained(args.tokenizer_name)
117
+ elif args.pretrained_model_name_or_path:
118
+ tokenizer = CLIPTokenizer.from_pretrained(args.pretrained_model_name_or_path, subfolder="tokenizer")
119
+
120
+ # 加载scheduler和模型
121
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
122
+ text_encoder = CLIPTextModel.from_pretrained(
123
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
124
+ )
125
+ vae = AutoencoderKL.from_pretrained(args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision)
126
+ unet = UNet2DConditionModel.from_pretrained(
127
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision
128
+ )
129
+ ```
130
+
131
+ 随后将特殊[占位符词汇](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L632)加入tokenizer,并调整嵌入层以适配新词汇。
132
+
133
+ 接着,脚本通过`TextualInversionDataset`[创建数据集](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L716):
134
+
135
+ ```py
136
+ train_dataset = TextualInversionDataset(
137
+ data_root=args.train_data_dir,
138
+ tokenizer=tokenizer,
139
+ size=args.resolution,
140
+ placeholder_token=(" ".join(tokenizer.convert_ids_to_tokens(placeholder_token_ids))),
141
+ repeats=args.repeats,
142
+ learnable_property=args.learnable_property,
143
+ center_crop=args.center_crop,
144
+ set="train",
145
+ )
146
+ train_dataloader = torch.utils.data.DataLoader(
147
+ train_dataset, batch_size=args.train_batch_size, shuffle=True, num_workers=args.dataloader_num_workers
148
+ )
149
+ ```
150
+
151
+ 最后,[训练循环](https://github.com/huggingface/diffusers/blob/b81c69e489aad3a0ba73798c459a33990dc4379c/examples/textual_inversion/textual_inversion.py#L784)处理从预测噪声残差到更新特殊占位符词汇嵌入权重的所有流程。
152
+
153
+ 如需深入了解训练循环工作原理,请参阅[理解管道、模型与调度器](../using-diffusers/write_own_pipeline)教程,该教程解析了去噪过程的基本模式。
154
+
155
+ ## 启动脚本
156
+
157
+ 完成所有修改或确认默认配置后,即可启动训练脚本!🚀
158
+
159
+ 本指南将下载[猫玩具](https://huggingface.co/datasets/diffusers/cat_toy_example)的示例图像并存储在目录中。当然,您也可以创建和使用自己的数据集(参见[创建训练数据集](create_dataset)指南)。
160
+
161
+ ```py
162
+ from huggingface_hub import snapshot_download
163
+
164
+ local_dir = "./cat"
165
+ snapshot_download(
166
+ "diffusers/cat_toy_example", local_dir=local_dir, repo_type="dataset", ignore_patterns=".gitattributes"
167
+ )
168
+ ```
169
+
170
+ 设置环境变量`MODEL_NAME`为Hub上的模型ID或本地模型路径,`DATA_DIR`为刚下载的猫图像路径。脚本会将以下文件保存至您的仓库:
171
+
172
+ - `learned_embeds.bin`:与示例图像对应的学习嵌入向量
173
+ - `token_identifier.txt`:特殊占位符词汇
174
+ - `type_of_concept.txt`:训练概念类型("object"或"style")
175
+
176
+ <Tip warning={true}>
177
+
178
+ 在单块V100 GPU上完整训练约需1小时。
179
+
180
+ </Tip>
181
+
182
+ 启动脚本前还有最后一步。如果想实时观察训练过程,可以定期保存生成图像。在训练命令中添加以下参数:
183
+
184
+ ```bash
185
+ --validation_prompt="A <cat-toy> train"
186
+ --num_validation_images=4
187
+ --validation_steps=100
188
+ ```
189
+
190
+ <hfoptions id="training-inference">
191
+ <hfoption id="PyTorch">
192
+
193
+ ```bash
194
+ export MODEL_NAME="stable-diffusion-v1-5/stable-diffusion-v1-5"
195
+ export DATA_DIR="./cat"
196
+
197
+ accelerate launch textual_inversion.py \
198
+ --pretrained_model_name_or_path=$MODEL_NAME \
199
+ --train_data_dir=$DATA_DIR \
200
+ --learnable_property="object" \
201
+ --placeholder_token="<cat-toy>" \
202
+ --initializer_token="toy" \
203
+ --resolution=512 \
204
+ --train_batch_size=1 \
205
+ --gradient_accumulation_steps=4 \
206
+ --max_train_steps=3000 \
207
+ --learning_rate=5.0e-04 \
208
+ --scale_lr \
209
+ --lr_scheduler="constant" \
210
+ --lr_warmup_steps=0 \
211
+ --output_dir="textual_inversion_cat" \
212
+ --push_to_hub
213
+ ```
214
+
215
+ </hfoption>
216
+ <hfoption id="Flax">
217
+
218
+ ```bash
219
+ export MODEL_NAME="duongna/stable-diffusion-v1-4-flax"
220
+ export DATA_DIR="./cat"
221
+
222
+ python textual_inversion_flax.py \
223
+ --pretrained_model_name_or_path=$MODEL_NAME \
224
+ --train_data_dir=$DATA_DIR \
225
+ --learnable_property="object" \
226
+ --placeholder_token="<cat-toy>" \
227
+ --initializer_token="toy" \
228
+ --resolution=512 \
229
+ --train_batch_size=1 \
230
+ --max_train_steps=3000 \
231
+ --learning_rate=5.0e-04 \
232
+ --scale_lr \
233
+ --output_dir="textual_inversion_cat" \
234
+ --push_to_hub
235
+ ```
236
+
237
+ </hfoption>
238
+ </hfoptions>
239
+
240
+ 训练完成后,可以像这样使用新模型进行推理:
241
+
242
+ <hfoptions id="training-inference">
243
+ <hfoption id="PyTorch">
244
+
245
+ ```py
246
+ from diffusers import StableDiffusionPipeline
247
+ import torch
248
+
249
+ pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16).to("cuda")
250
+ pipeline.load_textual_inversion("sd-concepts-library/cat-toy")
251
+ image = pipeline("A <cat-toy> train", num_inference_steps=50).images[0]
252
+ image.save("cat-train.png")
253
+ ```
254
+
255
+ </hfoption>
256
+ <hfoption id="Flax">
257
+
258
+ Flax不支持[`~loaders.TextualInversionLoaderMixin.load_textual_inversion`]方法,但textual_inversion_flax.py脚本会在训练后[保存](https://github.com/huggingface/diffusers/blob/c0f058265161178f2a88849e92b37ffdc81f1dcc/examples/textual_inversion/textual_inversion_flax.py#L636C2-L636C2)学习到的嵌入作为模型的一部分。这意味着您可以像使用其他Flax模型一样进行推理:
259
+
260
+ ```py
261
+ import jax
262
+ import numpy as np
263
+ from flax.jax_utils import replicate
264
+ from flax.training.common_utils import shard
265
+ from diffusers import FlaxStableDiffusionPipeline
266
+
267
+ model_path = "path-to-your-trained-model"
268
+ pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(model_path, dtype=jax.numpy.bfloat16)
269
+
270
+ prompt = "A <cat-toy> train"
271
+ prng_seed = jax.random.PRNGKey(0)
272
+ num_inference_steps = 50
273
+
274
+ num_samples = jax.device_count()
275
+ prompt = num_samples * [prompt]
276
+ prompt_ids = pipeline.prepare_inputs(prompt)
277
+
278
+ # 分片输入和随机数生成器
279
+ params = replicate(params)
280
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
281
+ prompt_ids = shard(prompt_ids)
282
+
283
+ images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
284
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
285
+ image.save("cat-train.png")
286
+ ```
287
+
288
+ </hfoption>
289
+ </hfoptions>
290
+
291
+ ## 后续步骤
292
+
293
+ 恭喜您成功训练了自己的文本反转模型!🎉 如需了解更多使用技巧,以下指南可能会有所帮助:
294
+
295
+ - 学习如何[加载文本反转嵌入](../using-diffusers/loading_adapters),并将其用作负面嵌入
296
+ - 学习如何将[文本反转](textual_inversion_inference)应用于Stable Diffusion 1/2和Stable Diffusion XL的推理
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/training/wuerstchen.md ADDED
@@ -0,0 +1,191 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
+ the License. You may obtain a copy of the License at
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
+ specific language governing permissions and limitations under the License.
11
+ -->
12
+
13
+ # Wuerstchen
14
+
15
+ [Wuerstchen](https://hf.co/papers/2306.00637) 模型通过将潜在空间压缩 42 倍,在不影响图像质量的情况下大幅降低计算成本并加速推理。在训练过程中,Wuerstchen 使用两个模型(VQGAN + 自动编码器)来压缩潜在表示,然后第三个模型(文本条件潜在扩散模型)在这个高度压缩的空间上进行条件化以生成图像。
16
+
17
+ 为了将先验模型放入 GPU 内存并加速训练,尝试分别启用 `gradient_accumulation_steps`、`gradient_checkpointing` 和 `mixed_precision`。
18
+
19
+ 本指南探讨 [train_text_to_image_prior.py](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_prior.py) 脚本,帮助您更熟悉它,以及如何根据您的用例进行适配。
20
+
21
+ 在运行脚本之前,请确保从源代码安装库:
22
+
23
+ ```bash
24
+ git clone https://github.com/huggingface/diffusers
25
+ cd diffusers
26
+ pip install .
27
+ ```
28
+
29
+ 然后导航到包含训练脚本的示例文件夹,并安装脚本所需的依赖项:
30
+
31
+ ```bash
32
+ cd examples/wuerstchen/text_to_image
33
+ pip install -r requirements.txt
34
+ ```
35
+
36
+ <Tip>
37
+
38
+ 🤗 Accelerate 是一个帮助您在多个 GPU/TPU 上或使用混合精度进行训练的库。它会根据您的硬件和环境自动配置训练设置。查看 🤗 Accelerate [快速入门](https://huggingface.co/docs/accelerate/quicktour) 以了解更多信息。
39
+
40
+ </Tip>
41
+
42
+ 初始化一个 🤗 Accelerate 环境:
43
+
44
+ ```bash
45
+ accelerate config
46
+ ```
47
+
48
+ 要设置一个默认的 🤗 Accelerate 环境而不选择任何配置:
49
+
50
+ ```bash
51
+ accelerate config default
52
+ ```
53
+
54
+ 或者,如果您的环境不支持交互式 shell,例如笔记本,您可以使用:
55
+
56
+ ```py
57
+ from accelerate.utils import write_basic_config
58
+
59
+ write_basic_config()
60
+ ```
61
+
62
+ 最后,如果您想在自己的数据集上训练模型,请查看 [创建训练数据集](create_dataset) 指南,了解如何创建与训练脚本兼容的数据集。
63
+
64
+ <Tip>
65
+
66
+ 以下部分重点介绍了训练脚本中对于理解如何修改它很重要的部分,但并未涵盖 [脚本](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/train_text_to_image_prior.py) 的详细信息。如果您有兴趣了解更多,请随时阅读脚本,并告诉我们您是否有任何问题或疑虑。
67
+
68
+ </Tip>
69
+
70
+ ## 脚本参数
71
+
72
+ 训练脚本提供了许多参数来帮助您自定义训练运行。所有参数及其描述都可以在 [`parse_args()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L192) 函数中找到。它为每个参数提供了默认值,例如训练批次大小和学习率,但如果您愿意,也可以在训练命令中设置自己的值。
73
+
74
+ 例如,要使用 fp16 格式的混合精度加速训练,请在训练命令中添加 `--mixed_precision` 参数:
75
+
76
+ ```bash
77
+ accelerate launch train_text_to_image_prior.py \
78
+ --mixed_precision="fp16"
79
+ ```
80
+
81
+ 大多数参数与 [文本到图像](text2image#script-parameters) 训练指南中的参数相同,因此让我们直接深入 Wuerstchen 训练脚本!
82
+
83
+ ## 训练脚本
84
+
85
+ 训练脚本也与 [文本到图像](text2image#training-script) 训练指南类似,但已修改以支持 Wuerstchen。本指南重点介绍 Wuerstchen 训练脚本中独特的代码。
86
+
87
+ [`main()`](https://github.com/huggingface/diffusers/blob/6e68c71503682c8693cb5b06a4da4911dfd655ee/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L441) 函数首先初始化图像编码器 - 一个 [EfficientNet](https://github.com/huggingface/diffusers/blob/main/examples/wuerstchen/text_to_image/modeling_efficient_net_encoder.py) - 以及通常的调度器和分词器。
88
+
89
+ ```py
90
+ with ContextManagers(deepspeed_zero_init_disabled_context_manager()):
91
+ pretrained_checkpoint_file = hf_hub_download("dome272/wuerstchen", filename="model_v2_stage_b.pt")
92
+ state_dict = torch.load(pretrained_checkpoint_file, map_location="cpu")
93
+ image_encoder = EfficientNetEncoder()
94
+ image_encoder.load_state_dict(state_dict["effnet_state_dict"])
95
+ image_encoder.eval()
96
+ ```
97
+
98
+ 您还将加载 [`WuerstchenPrior`] 模型以进行优化。
99
+
100
+ ```py
101
+ prior = WuerstchenPrior.from_pretrained(args.pretrained_prior_model_name_or_path, subfolder="prior")
102
+
103
+ optimizer = optimizer_cls(
104
+ prior.parameters(),
105
+ lr=args.learning_rate,
106
+ betas=(args.adam_beta1, args.adam_beta2),
107
+ weight_decay=args.adam_weight_decay,
108
+ eps=args.adam_epsilon,
109
+ )
110
+ ```
111
+
112
+ 接下来,您将对图像应用一些 [transforms](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L656) 并对标题进行 [tokenize](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L637):
113
+
114
+ ```py
115
+ def preprocess_train(examples):
116
+ images = [image.conver
117
+ t("RGB") for image in examples[image_column]]
118
+ examples["effnet_pixel_values"] = [effnet_transforms(image) for image in images]
119
+ examples["text_input_ids"], examples["text_mask"] = tokenize_captions(examples)
120
+ return examples
121
+ ```
122
+
123
+ 最后,[训练循环](https://github.com/huggingface/diffusers/blob/65ef7a0c5c594b4f84092e328fbdd73183613b30/examples/wuerstchen/text_to_image/train_text_to_image_prior.py#L656)处理使用`EfficientNetEncoder`将图像压缩到潜在空间,向潜在表示添加噪声,并使用[`WuerstchenPrior`]模型预测噪声残差。
124
+
125
+ ```py
126
+ pred_noise = prior(noisy_latents, timesteps, prompt_embeds)
127
+ ```
128
+
129
+ 如果您想了解更多关于训练循环的工作原理,请查看[理解管道、模型和调度器](../using-diffusers/write_own_pipeline)教程,该教程分解了去噪过程的基本模式。
130
+
131
+ ## 启动脚本
132
+
133
+ 一旦您完成了所有更改或对默认配置满意,就可以启动训练脚本了!🚀
134
+
135
+ 设置`DATASET_NAME`环境变量为Hub中的数据集名称。本指南使用[Naruto BLIP captions](https://huggingface.co/datasets/lambdalabs/naruto-blip-captions)数据集,但您也可以创建和训练自己的数据集(参见[创建用于训练的数据集](create_dataset)指南)。
136
+
137
+ <Tip>
138
+
139
+ 要使用Weights & Biases监控训练进度,请在训练命令中添加`--report_to=wandb`参数。您还需要在训练命令中添加`--validation_prompt`以跟踪结果。这对于调试模型和查看中间结果非常有用。
140
+
141
+ </Tip>
142
+
143
+ ```bash
144
+ export DATASET_NAME="lambdalabs/naruto-blip-captions"
145
+
146
+ accelerate launch train_text_to_image_prior.py \
147
+ --mixed_precision="fp16" \
148
+ --dataset_name=$DATASET_NAME \
149
+ --resolution=768 \
150
+ --train_batch_size=4 \
151
+ --gradient_accumulation_steps=4 \
152
+ --gradient_checkpointing \
153
+ --dataloader_num_workers=4 \
154
+ --max_train_steps=15000 \
155
+ --learning_rate=1e-05 \
156
+ --max_grad_norm=1 \
157
+ --checkpoints_total_limit=3 \
158
+ --lr_scheduler="constant" \
159
+ --lr_warmup_steps=0 \
160
+ --validation_prompts="A robot naruto, 4k photo" \
161
+ --report_to="wandb" \
162
+ --push_to_hub \
163
+ --output_dir="wuerstchen-prior-naruto-model"
164
+ ```
165
+
166
+ 训练完成后,您可以使用新训练的模型进行推理!
167
+
168
+ ```py
169
+ import torch
170
+ from diffusers import AutoPipelineForText2Image
171
+ from diffusers.pipelines.wuerstchen import DEFAULT_STAGE_C_TIMESTEPS
172
+
173
+ pipeline = AutoPipelineForText2Image.from_pretrained("path/to/saved/model", torch_dtype=torch.float16).to("cuda")
174
+
175
+ caption = "A cute bird naruto holding a shield"
176
+ images = pipeline(
177
+ caption,
178
+ width=1024,
179
+ height=1536,
180
+ prior_timesteps=DEFAULT_STAGE_C_TIMESTEPS,
181
+ prior_guidance_scale=4.0,
182
+ num_images_per_prompt=2,
183
+ ).images
184
+ ```
185
+
186
+ ## 下一步
187
+
188
+ 恭喜您训练了一个Wuerstchen模型!要了解更多关于如何使用您的新模型的信息,请参
189
+ 以下内容可能有所帮助:
190
+
191
+ - 查看 [Wuerstchen](../api/pipelines/wuerstchen#text-to-image-generation) API 文档,了解更多关于如何使用该管道进行文本到图像生成及其限制的信息。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/using-diffusers/consisid.md ADDED
@@ -0,0 +1,100 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with
4
+ the License. You may obtain a copy of the License at
5
+
6
+ http://www.apache.org/licenses/LICENSE-2.0
7
+
8
+ Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
9
+ an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the
10
+ specific language governing permissions and limitations under the License.
11
+ -->
12
+ # ConsisID
13
+
14
+ [ConsisID](https://github.com/PKU-YuanGroup/ConsisID)是一种身份保持的文本到视频生成模型,其通过频率分解在生成的视频中保持面部一致性。它具有以下特点:
15
+
16
+ - 基于频率分解:将人物ID特征解耦为高频和低频部分,从频域的角度分析DIT架构的特性,并且基于此特性设计合理的控制信息注入方式。
17
+
18
+ - 一致性训练策略:我们提出粗到细训练策略、动态掩码损失、动态跨脸损失,进一步提高了模型的泛化能力和身份保持效果。
19
+
20
+
21
+ - 推理无需微调:之前的方法在推理前,需要对输入id进行case-by-case微调,时间和算力开销较大,而我们的方法是tuning-free的。
22
+
23
+
24
+ 本指南将指导您使用 ConsisID 生成身份保持的视频。
25
+
26
+ ## Load Model Checkpoints
27
+ 模型权重可以存储在Hub上或本地的单独子文件夹中,在这种情况下,您应该使用 [`~DiffusionPipeline.from_pretrained`] 方法。
28
+
29
+
30
+ ```python
31
+ # !pip install consisid_eva_clip insightface facexlib
32
+ import torch
33
+ from diffusers import ConsisIDPipeline
34
+ from diffusers.pipelines.consisid.consisid_utils import prepare_face_models, process_face_embeddings_infer
35
+ from huggingface_hub import snapshot_download
36
+
37
+ # Download ckpts
38
+ snapshot_download(repo_id="BestWishYsh/ConsisID-preview", local_dir="BestWishYsh/ConsisID-preview")
39
+
40
+ # Load face helper model to preprocess input face image
41
+ face_helper_1, face_helper_2, face_clip_model, face_main_model, eva_transform_mean, eva_transform_std = prepare_face_models("BestWishYsh/ConsisID-preview", device="cuda", dtype=torch.bfloat16)
42
+
43
+ # Load consisid base model
44
+ pipe = ConsisIDPipeline.from_pretrained("BestWishYsh/ConsisID-preview", torch_dtype=torch.bfloat16)
45
+ pipe.to("cuda")
46
+ ```
47
+
48
+ ## Identity-Preserving Text-to-Video
49
+ 对于身份保持的文本到视频生成,需要输入文本提示和包含清晰面部(例如,最好是半身或全身)的图像。默认情况下,ConsisID 会生成 720x480 的视频以获得最佳效果。
50
+
51
+ ```python
52
+ from diffusers.utils import export_to_video
53
+
54
+ prompt = "The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel."
55
+ image = "https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_input.png?download=true"
56
+
57
+ id_cond, id_vit_hidden, image, face_kps = process_face_embeddings_infer(face_helper_1, face_clip_model, face_helper_2, eva_transform_mean, eva_transform_std, face_main_model, "cuda", torch.bfloat16, image, is_align_face=True)
58
+
59
+ video = pipe(image=image, prompt=prompt, num_inference_steps=50, guidance_scale=6.0, use_dynamic_cfg=False, id_vit_hidden=id_vit_hidden, id_cond=id_cond, kps_cond=face_kps, generator=torch.Generator("cuda").manual_seed(42))
60
+ export_to_video(video.frames[0], "output.mp4", fps=8)
61
+ ```
62
+ <table>
63
+ <tr>
64
+ <th style="text-align: center;">Face Image</th>
65
+ <th style="text-align: center;">Video</th>
66
+ <th style="text-align: center;">Description</th
67
+ </tr>
68
+ <tr>
69
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_0.png?download=true" style="height: auto; width: 600px;"></td>
70
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_0.gif?download=true" style="height: auto; width: 2000px;"></td>
71
+ <td>The video, in a beautifully crafted animated style, features a confident woman riding a horse through a lush forest clearing. Her expression is focused yet serene as she adjusts her wide-brimmed hat with a practiced hand. She wears a flowy bohemian dress, which moves gracefully with the rhythm of the horse, the fabric flowing fluidly in the animated motion. The dappled sunlight filters through the trees, casting soft, painterly patterns on the forest floor. Her posture is poised, showing both control and elegance as she guides the horse with ease. The animation's gentle, fluid style adds a dreamlike quality to the scene, with the woman’s calm demeanor and the peaceful surroundings evoking a sense of freedom and harmony.</td>
72
+ </tr>
73
+ <tr>
74
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_1.png?download=true" style="height: auto; width: 600px;"></td>
75
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_1.gif?download=true" style="height: auto; width: 2000px;"></td>
76
+ <td>The video, in a captivating animated style, shows a woman standing in the center of a snowy forest, her eyes narrowed in concentration as she extends her hand forward. She is dressed in a deep blue cloak, her breath visible in the cold air, which is rendered with soft, ethereal strokes. A faint smile plays on her lips as she summons a wisp of ice magic, watching with focus as the surrounding trees and ground begin to shimmer and freeze, covered in delicate ice crystals. The animation’s fluid motion brings the magic to life, with the frost spreading outward in intricate, sparkling patterns. The environment is painted with soft, watercolor-like hues, enhancing the magical, dreamlike atmosphere. The overall mood is serene yet powerful, with the quiet winter air amplifying the delicate beauty of the frozen scene.</td>
77
+ </tr>
78
+ <tr>
79
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_2.png?download=true" style="height: auto; width: 600px;"></td>
80
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_2.gif?download=true" style="height: auto; width: 2000px;"></td>
81
+ <td>The animation features a whimsical portrait of a balloon seller standing in a gentle breeze, captured with soft, hazy brushstrokes that evoke the feel of a serene spring day. His face is framed by a gentle smile, his eyes squinting slightly against the sun, while a few wisps of hair flutter in the wind. He is dressed in a light, pastel-colored shirt, and the balloons around him sway with the wind, adding a sense of playfulness to the scene. The background blurs softly, with hints of a vibrant market or park, enhancing the light-hearted, yet tender mood of the moment.</td>
82
+ </tr>
83
+ <tr>
84
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_3.png?download=true" style="height: auto; width: 600px;"></td>
85
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_3.gif?download=true" style="height: auto; width: 2000px;"></td>
86
+ <td>The video captures a boy walking along a city street, filmed in black and white on a classic 35mm camera. His expression is thoughtful, his brow slightly furrowed as if he's lost in contemplation. The film grain adds a textured, timeless quality to the image, evoking a sense of nostalgia. Around him, the cityscape is filled with vintage buildings, cobblestone sidewalks, and softly blurred figures passing by, their outlines faint and indistinct. Streetlights cast a gentle glow, while shadows play across the boy's path, adding depth to the scene. The lighting highlights the boy's subtle smile, hinting at a fleeting moment of curiosity. The overall cinematic atmosphere, complete with classic film still aesthetics and dramatic contrasts, gives the scene an evocative and introspective feel.</td>
87
+ </tr>
88
+ <tr>
89
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_image_4.png?download=true" style="height: auto; width: 600px;"></td>
90
+ <td><img src="https://huggingface.co/datasets/huggingface/documentation-images/resolve/main/diffusers/consisid/consisid_output_4.gif?download=true" style="height: auto; width: 2000px;"></td>
91
+ <td>The video features a baby wearing a bright superhero cape, standing confidently with arms raised in a powerful pose. The baby has a determined look on their face, with eyes wide and lips pursed in concentration, as if ready to take on a challenge. The setting appears playful, with colorful toys scattered around and a soft rug underfoot, while sunlight streams through a nearby window, highlighting the fluttering cape and adding to the impression of heroism. The overall atmosphere is lighthearted and fun, with the baby's expressions capturing a mix of innocence and an adorable attempt at bravery, as if truly ready to save the day.</td>
92
+ </tr>
93
+ </table>
94
+
95
+ ## Resources
96
+
97
+ 通过以下资源了解有关 ConsisID 的更多信息:
98
+
99
+ - 一段 [视频](https://www.youtube.com/watch?v=PhlgC-bI5SQ) 演示了 ConsisID 的主要功能;
100
+ - 有关更多详细信息,请参阅研究论文 [Identity-Preserving Text-to-Video Generation by Frequency Decomposition](https://hf.co/papers/2411.17440)。
exp_code/1_benchmark/diffusers-WanS2V/docs/source/zh/using-diffusers/schedulers.md ADDED
@@ -0,0 +1,256 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!--Copyright 2025 The HuggingFace Team. All rights reserved.
2
+
3
+ 根据 Apache License 2.0 许可证(以下简称"许可证")授权;
4
+ 除非符合许可证要求,否则不得使用本文件。
5
+ 您可以通过以下链接获取许可证副本:
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ 除非适用法律要求或书面同意,本软件按"原样"分发,
10
+ 无任何明示或暗示的担保或条件。详见许可证中关于权限和限制的具体规定。
11
+ -->
12
+
13
+ # 加载调度器与模型
14
+
15
+ [[open-in-colab]]
16
+
17
+ Diffusion管道是由可互换的调度器(schedulers)和模型(models)组成的集合,可通过混合搭配来定制特定用例的流程。调度器封装了整个去噪过程(如去噪步数和寻找去噪样本的算法),其本身不包含可训练参数,因此内存占用极低。模型则主要负责从含噪输入到较纯净样本的前向传播过程。
18
+
19
+ 本指南将展示如何加载调度器和模型来自定义流程。我们将全程使用[stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5)检查点,首先加载基础管道:
20
+
21
+ ```python
22
+ import torch
23
+ from diffusers import DiffusionPipeline
24
+
25
+ pipeline = DiffusionPipeline.from_pretrained(
26
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
27
+ ).to("cuda")
28
+ ```
29
+
30
+ 通过`pipeline.scheduler`属性可查看当前管道使用的调度器:
31
+
32
+ ```python
33
+ pipeline.scheduler
34
+ PNDMScheduler {
35
+ "_class_name": "PNDMScheduler",
36
+ "_diffusers_version": "0.21.4",
37
+ "beta_end": 0.012,
38
+ "beta_schedule": "scaled_linear",
39
+ "beta_start": 0.00085,
40
+ "clip_sample": false,
41
+ "num_train_timesteps": 1000,
42
+ "set_alpha_to_one": false,
43
+ "skip_prk_steps": true,
44
+ "steps_offset": 1,
45
+ "timestep_spacing": "leading",
46
+ "trained_betas": null
47
+ }
48
+ ```
49
+
50
+ ## 加载调度器
51
+
52
+ 调度器通过配置文件定义,同一配置文件可被多种调度器共享。使用[`SchedulerMixin.from_pretrained`]方法加载时,需指定`subfolder`参数以定位配置文件在仓库中的正确子目录。
53
+
54
+ 例如加载[`DDIMScheduler`]:
55
+
56
+ ```python
57
+ from diffusers import DDIMScheduler, DiffusionPipeline
58
+
59
+ ddim = DDIMScheduler.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="scheduler")
60
+ ```
61
+
62
+ 然后将新调度器传入管道:
63
+
64
+ ```python
65
+ pipeline = DiffusionPipeline.from_pretrained(
66
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", scheduler=ddim, torch_dtype=torch.float16, use_safetensors=True
67
+ ).to("cuda")
68
+ ```
69
+
70
+ ## 调度器对比
71
+
72
+ 不同调度器各有优劣,难以定量评估哪个最适合您的流程。通常需要在去噪速度与质量之间权衡。我们建议尝试多种调度器以找到最佳方案。通过`pipeline.scheduler.compatibles`属性可查看兼容当前管道的所有调度器。
73
+
74
+ 下面我们使用相同提示词和随机种子,对比[`LMSDiscreteScheduler`]、[`EulerDiscreteScheduler`]、[`EulerAncestralDiscreteScheduler`]和[`DPMSolverMultistepScheduler`]的表现:
75
+
76
+ ```python
77
+ import torch
78
+ from diffusers import DiffusionPipeline
79
+
80
+ pipeline = DiffusionPipeline.from_pretrained(
81
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True
82
+ ).to("cuda")
83
+
84
+ prompt = "A photograph of an astronaut riding a horse on Mars, high resolution, high definition."
85
+ generator = torch.Generator(device="cuda").manual_seed(8)
86
+ ```
87
+
88
+ 使用[`~ConfigMixin.from_config`]方法加载不同调度器的配置来切换管道调度器:
89
+
90
+ <hfoptions id="schedulers">
91
+ <hfoption id="LMSDiscreteScheduler">
92
+
93
+ [`LMSDiscreteScheduler`]通常能生成比默认调度器更高质量的图像。
94
+
95
+ ```python
96
+ from diffusers import LMSDiscreteScheduler
97
+
98
+ pipeline.scheduler = LMSDiscreteScheduler.from_config(pipeline.scheduler.config)
99
+ image = pipeline(prompt, generator=generator).images[0]
100
+ image
101
+ ```
102
+
103
+ </hfoption>
104
+ <hfoption id="EulerDiscreteScheduler">
105
+
106
+ [`EulerDiscreteScheduler`]仅需30步即可生成高质量图像。
107
+
108
+ ```python
109
+ from diffusers import EulerDiscreteScheduler
110
+
111
+ pipeline.scheduler = EulerDiscreteScheduler.from_config(pipeline.scheduler.config)
112
+ image = pipeline(prompt, generator=generator).images[0]
113
+ image
114
+ ```
115
+
116
+ </hfoption>
117
+ <hfoption id="EulerAncestralDiscreteScheduler">
118
+
119
+ [`EulerAncestralDiscreteScheduler`]同样可在30步内生成高质量图像。
120
+
121
+ ```python
122
+ from diffusers import EulerAncestralDiscreteScheduler
123
+
124
+ pipeline.scheduler = EulerAncestralDiscreteScheduler.from_config(pipeline.scheduler.config)
125
+ image = pipeline(prompt, generator=generator).images[0]
126
+ image
127
+ ```
128
+
129
+ </hfoption>
130
+ <hfoption id="DPMSolverMultistepScheduler">
131
+
132
+ [`DPMSolverMultistepScheduler`]在速度与质量间取得平衡,仅需20步即可生成优质图像。
133
+
134
+ ```python
135
+ from diffusers import DPMSolverMultistepScheduler
136
+
137
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
138
+ image = pipeline(prompt, generator=generator).images[0]
139
+ image
140
+ ```
141
+
142
+ </hfoption>
143
+ </hfoptions>
144
+
145
+ <div class="flex gap-4">
146
+ <div>
147
+ <img class="rounded-xl" src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_lms.png" />
148
+ <figcaption class="mt-2 text-center text-sm text-gray-500">LMSDiscreteScheduler</figcaption>
149
+ </div>
150
+ <div>
151
+ <img class="rounded-xl" src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_discrete.png" />
152
+ <figcaption class="mt-2 text-center text-sm text-gray-500">EulerDiscreteScheduler</figcaption>
153
+ </div>
154
+ </div>
155
+ <div class="flex gap-4">
156
+ <div>
157
+ <img class="rounded-xl" src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_euler_ancestral.png" />
158
+ <figcaption class="mt-2 text-center text-sm text-gray-500">EulerAncestralDiscreteScheduler</figcaption>
159
+ </div>
160
+ <div>
161
+ <img class="rounded-xl" src="https://huggingface.co/datasets/patrickvonplaten/images/resolve/main/diffusers_docs/astronaut_dpm.png" />
162
+ <figcaption class="mt-2 text-center text-sm text-gray-500">DPMSolverMultistepScheduler</figcaption>
163
+ </div>
164
+ </div>
165
+
166
+ 多数生成图像质量相近,实际选择需根据具体场景测试多种调度器进行比较。
167
+
168
+ ### Flax调度器
169
+
170
+ 对比Flax调度器时,需额外将调度器状态加载到模型参数中。例如将[`FlaxStableDiffusionPipeline`]的默认调度器切换为超高效的[`FlaxDPMSolverMultistepScheduler`]:
171
+
172
+ > [!警告]
173
+ > [`FlaxLMSDiscreteScheduler`]和[`FlaxDDPMScheduler`]目前暂不兼容[`FlaxStableDiffusionPipeline`]。
174
+
175
+ ```python
176
+ import jax
177
+ import numpy as np
178
+ from flax.jax_utils import replicate
179
+ from flax.training.common_utils import shard
180
+ from diffusers import FlaxStableDiffusionPipeline, FlaxDPMSolverMultistepScheduler
181
+
182
+ scheduler, scheduler_state = FlaxDPMSolverMultistepScheduler.from_pretrained(
183
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
184
+ subfolder="scheduler"
185
+ )
186
+ pipeline, params = FlaxStableDiffusionPipeline.from_pretrained(
187
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
188
+ scheduler=scheduler,
189
+ variant="bf16",
190
+ dtype=jax.numpy.bfloat16,
191
+ )
192
+ params["scheduler"] = scheduler_state
193
+ ```
194
+
195
+ 利用Flax对TPU的兼容性实现并行图像生成。需为每个设备复制模型参数,并分配输入数据:
196
+
197
+ ```python
198
+ # 每个并行设备生成1张图像(TPUv2-8/TPUv3-8支持8设备并行)
199
+ prompt = "一张宇航员在火星上骑马的高清照片,高分辨率,高画质。"
200
+ num_samples = jax.device_count()
201
+ prompt_ids = pipeline.prepare_inputs([prompt] * num_samples)
202
+
203
+ prng_seed = jax.random.PRNGKey(0)
204
+ num_inference_steps = 25
205
+
206
+ # 分配输入和随机种子
207
+ params = replicate(params)
208
+ prng_seed = jax.random.split(prng_seed, jax.device_count())
209
+ prompt_ids = shard(prompt_ids)
210
+
211
+ images = pipeline(prompt_ids, params, prng_seed, num_inference_steps, jit=True).images
212
+ images = pipeline.numpy_to_pil(np.asarray(images.reshape((num_samples,) + images.shape[-3:])))
213
+ ```
214
+
215
+ ## 模型加载
216
+
217
+ 通过[`ModelMixin.from_pretrained`]方法加载模型,该方法会下载并缓存模型权重和配置的最新版本。若本地缓存已存在最新文件,则直接复用缓存而非重复下载。
218
+
219
+ 通过`subfolder`参数可从子目录加载模型。例如[stable-diffusion-v1-5/stable-diffusion-v1-5](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5)的模型权重存储在[unet](https://hf.co/stable-diffusion-v1-5/stable-diffusion-v1-5/tree/main/unet)子目录中:
220
+
221
+ ```python
222
+ from diffusers import UNet2DConditionModel
223
+
224
+ unet = UNet2DConditionModel.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet", use_safetensors=True)
225
+ ```
226
+
227
+ 也可直接从[仓库](https://huggingface.co/google/ddpm-cifar10-32/tree/main)加载:
228
+
229
+ ```python
230
+ from diffusers import UNet2DModel
231
+
232
+ unet = UNet2DModel.from_pretrained("google/ddpm-cifar10-32", use_safetensors=True)
233
+ ```
234
+
235
+ 加载和保存模型变体时,需在[`ModelMixin.from_pretrained`]和[`ModelMixin.save_pretrained`]中指定`variant`参数:
236
+
237
+ ```python
238
+ from diffusers import UNet2DConditionModel
239
+
240
+ unet = UNet2DConditionModel.from_pretrained(
241
+ "stable-diffusion-v1-5/stable-diffusion-v1-5", subfolder="unet", variant="non_ema", use_safetensors=True
242
+ )
243
+ unet.save_pretrained("./local-unet", variant="non_ema")
244
+ ```
245
+
246
+ 使用[`~ModelMixin.from_pretrained`]的`torch_dtype`参数指定模型加载精度:
247
+
248
+ ```python
249
+ from diffusers import AutoModel
250
+
251
+ unet = AutoModel.from_pretrained(
252
+ "stabilityai/stable-diffusion-xl-base-1.0", subfolder="unet", torch_dtype=torch.float16
253
+ )
254
+ ```
255
+
256
+ 也可使用[torch.Tensor.to](https://docs.pytorch.org/docs/stable/generated/torch.Tensor.to.html)方法即时转换精度,但会转换所有权重(不同于`torch_dtype`参数会保留`_keep_in_fp32_modules`中的层)。这对某些必须保持fp32精度的层尤为重要(参见[示例](https://github.com/huggingface/diffusers/blob/f864a9a352fa4a220d860bfdd1782e3e5af96382/src/diffusers/models/transformers/transformer_wan.py#L374))。
exp_code/1_benchmark/diffusers-WanS2V/examples/README.md ADDED
@@ -0,0 +1,70 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ <!---
2
+ Copyright 2025 The HuggingFace Team. All rights reserved.
3
+ Licensed under the Apache License, Version 2.0 (the "License");
4
+ you may not use this file except in compliance with the License.
5
+ You may obtain a copy of the License at
6
+
7
+ http://www.apache.org/licenses/LICENSE-2.0
8
+
9
+ Unless required by applicable law or agreed to in writing, software
10
+ distributed under the License is distributed on an "AS IS" BASIS,
11
+ WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ See the License for the specific language governing permissions and
13
+ limitations under the License.
14
+ -->
15
+
16
+ # 🧨 Diffusers Examples
17
+
18
+ Diffusers examples are a collection of scripts to demonstrate how to effectively use the `diffusers` library
19
+ for a variety of use cases involving training or fine-tuning.
20
+
21
+ **Note**: If you are looking for **official** examples on how to use `diffusers` for inference, please have a look at [src/diffusers/pipelines](https://github.com/huggingface/diffusers/tree/main/src/diffusers/pipelines).
22
+
23
+ Our examples aspire to be **self-contained**, **easy-to-tweak**, **beginner-friendly** and for **one-purpose-only**.
24
+ More specifically, this means:
25
+
26
+ - **Self-contained**: An example script shall only depend on "pip-install-able" Python packages that can be found in a `requirements.txt` file. Example scripts shall **not** depend on any local files. This means that one can simply download an example script, *e.g.* [train_unconditional.py](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/train_unconditional.py), install the required dependencies, *e.g.* [requirements.txt](https://github.com/huggingface/diffusers/blob/main/examples/unconditional_image_generation/requirements.txt) and execute the example script.
27
+ - **Easy-to-tweak**: While we strive to present as many use cases as possible, the example scripts are just that - examples. It is expected that they won't work out-of-the box on your specific problem and that you will be required to change a few lines of code to adapt them to your needs. To help you with that, most of the examples fully expose the preprocessing of the data and the training loop to allow you to tweak and edit them as required.
28
+ - **Beginner-friendly**: We do not aim for providing state-of-the-art training scripts for the newest models, but rather examples that can be used as a way to better understand diffusion models and how to use them with the `diffusers` library. We often purposefully leave out certain state-of-the-art methods if we consider them too complex for beginners.
29
+ - **One-purpose-only**: Examples should show one task and one task only. Even if a task is from a modeling point of view very similar, *e.g.* image super-resolution and image modification tend to use the same model and training method, we want examples to showcase only one task to keep them as readable and easy-to-understand as possible.
30
+
31
+ We provide **official** examples that cover the most popular tasks of diffusion models.
32
+ *Official* examples are **actively** maintained by the `diffusers` maintainers and we try to rigorously follow our example philosophy as defined above.
33
+ If you feel like another important example should exist, we are more than happy to welcome a [Feature Request](https://github.com/huggingface/diffusers/issues/new?assignees=&labels=&template=feature_request.md&title=) or directly a [Pull Request](https://github.com/huggingface/diffusers/compare) from you!
34
+
35
+ Training examples show how to pretrain or fine-tune diffusion models for a variety of tasks. Currently we support:
36
+
37
+ | Task | 🤗 Accelerate | 🤗 Datasets | Colab
38
+ |---|---|:---:|:---:|
39
+ | [**Unconditional Image Generation**](./unconditional_image_generation) | ✅ | ✅ | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/training_example.ipynb)
40
+ | [**Text-to-Image fine-tuning**](./text_to_image) | ✅ | ✅ |
41
+ | [**Textual Inversion**](./textual_inversion) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_textual_inversion_training.ipynb)
42
+ | [**Dreambooth**](./dreambooth) | ✅ | - | [![Open In Colab](https://colab.research.google.com/assets/colab-badge.svg)](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/sd_dreambooth_training.ipynb)
43
+ | [**ControlNet**](./controlnet) | ✅ | ✅ | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/controlnet.ipynb)
44
+ | [**InstructPix2Pix**](./instruct_pix2pix) | ✅ | ✅ | [Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/InstructPix2Pix_using_diffusers.ipynb)
45
+ | [**Reinforcement Learning for Control**](./reinforcement_learning) | - | - | [Notebook1](https://github.com/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_for_control.ipynb), [Notebook2](https://github.com/huggingface/notebooks/blob/main/diffusers/reinforcement_learning_with_diffusers.ipynb)
46
+
47
+ ## Community
48
+
49
+ In addition, we provide **community** examples, which are examples added and maintained by our community.
50
+ Community examples can consist of both *training* examples or *inference* pipelines.
51
+ For such examples, we are more lenient regarding the philosophy defined above and also cannot guarantee to provide maintenance for every issue.
52
+ Examples that are useful for the community, but are either not yet deemed popular or not yet following our above philosophy should go into the [community examples](https://github.com/huggingface/diffusers/tree/main/examples/community) folder. The community folder therefore includes training examples and inference pipelines.
53
+ **Note**: Community examples can be a [great first contribution](https://github.com/huggingface/diffusers/issues?q=is%3Aopen+is%3Aissue+label%3A%22good+first+issue%22) to show to the community how you like to use `diffusers` 🪄.
54
+
55
+ ## Research Projects
56
+
57
+ We also provide **research_projects** examples that are maintained by the community as defined in the respective research project folders. These examples are useful and offer the extended capabilities which are complementary to the official examples. You may refer to [research_projects](https://github.com/huggingface/diffusers/tree/main/examples/research_projects) for details.
58
+
59
+ ## Important note
60
+
61
+ To make sure you can successfully run the latest versions of the example scripts, you have to **install the library from source** and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
62
+ ```bash
63
+ git clone https://github.com/huggingface/diffusers
64
+ cd diffusers
65
+ pip install .
66
+ ```
67
+ Then cd in the example folder of your choice and run
68
+ ```bash
69
+ pip install -r requirements.txt
70
+ ```
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/README.md ADDED
@@ -0,0 +1,466 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Advanced diffusion training examples
2
+
3
+ ## Train Dreambooth LoRA with Stable Diffusion XL
4
+ > [!TIP]
5
+ > 💡 This example follows the techniques and recommended practices covered in the blog post: [LoRA training scripts of the world, unite!](https://huggingface.co/blog/sdxl_lora_advanced_script). Make sure to check it out before starting 🤗
6
+
7
+ [DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text2image models like stable diffusion given just a few(3~5) images of a subject.
8
+
9
+ LoRA - Low-Rank Adaption of Large Language Models, was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*
10
+ In a nutshell, LoRA allows to adapt pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages:
11
+ - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114)
12
+ - Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable.
13
+ - LoRA attention layers allow to control to which extent the model is adapted towards new training images via a `scale` parameter.
14
+ [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in
15
+ the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository.
16
+
17
+ The `train_dreambooth_lora_sdxl_advanced.py` script shows how to implement dreambooth-LoRA, combining the training process shown in `train_dreambooth_lora_sdxl.py`, with
18
+ advanced features and techniques, inspired and built upon contributions by [Nataniel Ruiz](https://twitter.com/natanielruizg): [Dreambooth](https://dreambooth.github.io), [Rinon Gal](https://twitter.com/RinonGal): [Textual Inversion](https://textual-inversion.github.io), [Ron Mokady](https://twitter.com/MokadyRon): [Pivotal Tuning](https://huggingface.co/papers/2106.05744), [Simo Ryu](https://twitter.com/cloneofsimo): [cog-sdxl](https://github.com/replicate/cog-sdxl),
19
+ [Kohya](https://twitter.com/kohya_tech/): [sd-scripts](https://github.com/kohya-ss/sd-scripts), [The Last Ben](https://twitter.com/__TheBen): [fast-stable-diffusion](https://github.com/TheLastBen/fast-stable-diffusion) ❤️
20
+
21
+ > [!NOTE]
22
+ > 💡If this is your first time training a Dreambooth LoRA, congrats!🥳
23
+ > You might want to familiarize yourself more with the techniques: [Dreambooth blog](https://huggingface.co/blog/dreambooth), [Using LoRA for Efficient Stable Diffusion Fine-Tuning blog](https://huggingface.co/blog/lora)
24
+
25
+ 📚 Read more about the advanced features and best practices in this community derived blog post: [LoRA training scripts of the world, unite!](https://huggingface.co/blog/sdxl_lora_advanced_script)
26
+
27
+
28
+ ## Running locally with PyTorch
29
+
30
+ ### Installing the dependencies
31
+
32
+ Before running the scripts, make sure to install the library's training dependencies:
33
+
34
+ **Important**
35
+
36
+ To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
37
+ ```bash
38
+ git clone https://github.com/huggingface/diffusers
39
+ cd diffusers
40
+ pip install -e .
41
+ ```
42
+
43
+ Then cd in the `examples/advanced_diffusion_training` folder and run
44
+ ```bash
45
+ pip install -r requirements.txt
46
+ ```
47
+
48
+ And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
49
+
50
+ ```bash
51
+ accelerate config
52
+ ```
53
+
54
+ Or for a default accelerate configuration without answering questions about your environment
55
+
56
+ ```bash
57
+ accelerate config default
58
+ ```
59
+
60
+ Or if your environment doesn't support an interactive shell e.g. a notebook
61
+
62
+ ```python
63
+ from accelerate.utils import write_basic_config
64
+ write_basic_config()
65
+ ```
66
+
67
+ When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
68
+ Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment.
69
+
70
+ Lastly, we recommend logging into your HF account so that your trained LoRA is automatically uploaded to the hub:
71
+ ```bash
72
+ hf auth login
73
+ ```
74
+ This command will prompt you for a token. Copy-paste yours from your [settings/tokens](https://huggingface.co/settings/tokens),and press Enter.
75
+
76
+ > [!NOTE]
77
+ > In the examples below we use `wandb` to document the training runs. To do the same, make sure to install `wandb`:
78
+ > `pip install wandb`
79
+ > Alternatively, you can use other tools / train without reporting by modifying the flag `--report_to="wandb"`.
80
+
81
+ ### Pivotal Tuning
82
+ **Training with text encoder(s)**
83
+
84
+ Alongside the UNet, LoRA fine-tuning of the text encoders is also supported. In addition to the text encoder optimization
85
+ available with `train_dreambooth_lora_sdxl_advanced.py`, in the advanced script **pivotal tuning** is also supported.
86
+ [pivotal tuning](https://huggingface.co/blog/sdxl_lora_advanced_script#pivotal-tuning) combines Textual Inversion with regular diffusion fine-tuning -
87
+ we insert new tokens into the text encoders of the model, instead of reusing existing ones.
88
+ We then optimize the newly-inserted token embeddings to represent the new concept.
89
+
90
+ To do so, just specify `--train_text_encoder_ti` while launching training (for regular text encoder optimizations, use `--train_text_encoder`).
91
+ Please keep the following points in mind:
92
+
93
+ * SDXL has two text encoders. So, we fine-tune both using LoRA.
94
+ * When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory.
95
+
96
+ ### 3D icon example
97
+
98
+ Now let's get our dataset. For this example we will use some cool images of 3d rendered icons: https://huggingface.co/datasets/linoyts/3d_icon.
99
+
100
+ Let's first download it locally:
101
+
102
+ ```python
103
+ from huggingface_hub import snapshot_download
104
+
105
+ local_dir = "./3d_icon"
106
+ snapshot_download(
107
+ "LinoyTsaban/3d_icon",
108
+ local_dir=local_dir, repo_type="dataset",
109
+ ignore_patterns=".gitattributes",
110
+ )
111
+ ```
112
+
113
+ Let's review some of the advanced features we're going to be using for this example:
114
+ - **custom captions**:
115
+ To use custom captioning, first ensure that you have the datasets library installed, otherwise you can install it by
116
+ ```bash
117
+ pip install datasets
118
+ ```
119
+
120
+ Now we'll simply specify the name of the dataset and caption column (in this case it's "prompt")
121
+
122
+ ```
123
+ --dataset_name=./3d_icon
124
+ --caption_column=prompt
125
+ ```
126
+
127
+ You can also load a dataset straight from by specifying it's name in `dataset_name`.
128
+ Look [here](https://huggingface.co/blog/sdxl_lora_advanced_script#custom-captioning) for more info on creating/loading your own caption dataset.
129
+
130
+ - **optimizer**: for this example, we'll use [prodigy](https://huggingface.co/blog/sdxl_lora_advanced_script#adaptive-optimizers) - an adaptive optimizer
131
+ - To use Prodigy, please make sure to install the prodigyopt library: `pip install prodigyopt`
132
+ - **pivotal tuning**
133
+ - **min SNR gamma**
134
+
135
+ **Now, we can launch training:**
136
+
137
+ ```bash
138
+ export MODEL_NAME="stabilityai/stable-diffusion-xl-base-1.0"
139
+ export DATASET_NAME="./3d_icon"
140
+ export OUTPUT_DIR="3d-icon-SDXL-LoRA"
141
+ export VAE_PATH="madebyollin/sdxl-vae-fp16-fix"
142
+
143
+ accelerate launch train_dreambooth_lora_sdxl_advanced.py \
144
+ --pretrained_model_name_or_path=$MODEL_NAME \
145
+ --pretrained_vae_model_name_or_path=$VAE_PATH \
146
+ --dataset_name=$DATASET_NAME \
147
+ --instance_prompt="3d icon in the style of TOK" \
148
+ --validation_prompt="a TOK icon of an astronaut riding a horse, in the style of TOK" \
149
+ --output_dir=$OUTPUT_DIR \
150
+ --caption_column="prompt" \
151
+ --mixed_precision="bf16" \
152
+ --resolution=1024 \
153
+ --train_batch_size=3 \
154
+ --repeats=1 \
155
+ --report_to="wandb"\
156
+ --gradient_accumulation_steps=1 \
157
+ --gradient_checkpointing \
158
+ --learning_rate=1.0 \
159
+ --text_encoder_lr=1.0 \
160
+ --optimizer="prodigy"\
161
+ --train_text_encoder_ti\
162
+ --train_text_encoder_ti_frac=0.5\
163
+ --snr_gamma=5.0 \
164
+ --lr_scheduler="constant" \
165
+ --lr_warmup_steps=0 \
166
+ --rank=8 \
167
+ --max_train_steps=1000 \
168
+ --checkpointing_steps=2000 \
169
+ --seed="0" \
170
+ --push_to_hub
171
+ ```
172
+
173
+ To better track our training experiments, we're using the following flags in the command above:
174
+
175
+ * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`.
176
+ * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
177
+
178
+ Our experiments were conducted on a single 40GB A100 GPU.
179
+
180
+
181
+ ### Inference
182
+
183
+ Once training is done, we can perform inference like so:
184
+ 1. starting with loading the unet lora weights
185
+ ```python
186
+ import torch
187
+ from huggingface_hub import hf_hub_download, upload_file
188
+ from diffusers import DiffusionPipeline
189
+ from diffusers.models import AutoencoderKL
190
+ from safetensors.torch import load_file
191
+
192
+ username = "linoyts"
193
+ repo_id = f"{username}/3d-icon-SDXL-LoRA"
194
+
195
+ pipe = DiffusionPipeline.from_pretrained(
196
+ "stabilityai/stable-diffusion-xl-base-1.0",
197
+ torch_dtype=torch.float16,
198
+ variant="fp16",
199
+ ).to("cuda")
200
+
201
+
202
+ pipe.load_lora_weights(repo_id, weight_name="pytorch_lora_weights.safetensors")
203
+ ```
204
+ 2. now we load the pivotal tuning embeddings
205
+
206
+ ```python
207
+ text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
208
+ tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
209
+
210
+ embedding_path = hf_hub_download(repo_id=repo_id, filename="3d-icon-SDXL-LoRA_emb.safetensors", repo_type="model")
211
+
212
+ state_dict = load_file(embedding_path)
213
+ # load embeddings of text_encoder 1 (CLIP ViT-L/14)
214
+ pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
215
+ # load embeddings of text_encoder 2 (CLIP ViT-G/14)
216
+ pipe.load_textual_inversion(state_dict["clip_g"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
217
+ ```
218
+
219
+ 3. let's generate images
220
+
221
+ ```python
222
+ instance_token = "<s0><s1>"
223
+ prompt = f"a {instance_token} icon of an orange llama eating ramen, in the style of {instance_token}"
224
+
225
+ image = pipe(prompt=prompt, num_inference_steps=25, cross_attention_kwargs={"scale": 1.0}).images[0]
226
+ image.save("llama.png")
227
+ ```
228
+
229
+ ### Comfy UI / AUTOMATIC1111 Inference
230
+ The new script fully supports textual inversion loading with Comfy UI and AUTOMATIC1111 formats!
231
+
232
+ **AUTOMATIC1111 / SD.Next** \
233
+ In AUTOMATIC1111/SD.Next we will load a LoRA and a textual embedding at the same time.
234
+ - *LoRA*: Besides the diffusers format, the script will also train a WebUI compatible LoRA. It is generated as `{your_lora_name}.safetensors`. You can then include it in your `models/Lora` directory.
235
+ - *Embedding*: the embedding is the same for diffusers and WebUI. You can download your `{lora_name}_emb.safetensors` file from a trained model, and include it in your `embeddings` directory.
236
+
237
+ You can then run inference by prompting `a y2k_emb webpage about the movie Mean Girls <lora:y2k:0.9>`. You can use the `y2k_emb` token normally, including increasing its weight by doing `(y2k_emb:1.2)`.
238
+
239
+ **ComfyUI** \
240
+ In ComfyUI we will load a LoRA and a textual embedding at the same time.
241
+ - *LoRA*: Besides the diffusers format, the script will also train a ComfyUI compatible LoRA. It is generated as `{your_lora_name}.safetensors`. You can then include it in your `models/Lora` directory. Then you will load the LoRALoader node and hook that up with your model and CLIP. [Official guide for loading LoRAs](https://comfyanonymous.github.io/ComfyUI_examples/lora/)
242
+ - *Embedding*: the embedding is the same for diffusers and WebUI. You can download your `{lora_name}_emb.safetensors` file from a trained model, and include it in your `models/embeddings` directory and use it in your prompts like `embedding:y2k_emb`. [Official guide for loading embeddings](https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/).
243
+ -
244
+ ### Specifying a better VAE
245
+
246
+ SDXL's VAE is known to suffer from numerical instability issues. This is why we also expose a CLI argument namely `--pretrained_vae_model_name_or_path` that lets you specify the location of a better VAE (such as [this one](https://huggingface.co/madebyollin/sdxl-vae-fp16-fix)).
247
+
248
+ ### DoRA training
249
+ The advanced script supports DoRA training too!
250
+ > Proposed in [DoRA: Weight-Decomposed Low-Rank Adaptation](https://huggingface.co/papers/2402.09353),
251
+ **DoRA** is very similar to LoRA, except it decomposes the pre-trained weight into two components, **magnitude** and **direction** and employs LoRA for _directional_ updates to efficiently minimize the number of trainable parameters.
252
+ The authors found that by using DoRA, both the learning capacity and training stability of LoRA are enhanced without any additional overhead during inference.
253
+
254
+ > [!NOTE]
255
+ > 💡DoRA training is still _experimental_
256
+ > and is likely to require different hyperparameter values to perform best compared to a LoRA.
257
+ > Specifically, we've noticed 2 differences to take into account your training:
258
+ > 1. **LoRA seem to converge faster than DoRA** (so a set of parameters that may lead to overfitting when training a LoRA may be working well for a DoRA)
259
+ > 2. **DoRA quality superior to LoRA especially in lower ranks** the difference in quality of DoRA of rank 8 and LoRA of rank 8 appears to be more significant than when training ranks of 32 or 64 for example.
260
+ > This is also aligned with some of the quantitative analysis shown in the paper.
261
+
262
+ **Usage**
263
+ 1. To use DoRA you need to install `peft` from main:
264
+ ```bash
265
+ pip install git+https://github.com/huggingface/peft.git
266
+ ```
267
+ 2. Enable DoRA training by adding this flag
268
+ ```bash
269
+ --use_dora
270
+ ```
271
+ **Inference**
272
+ The inference is the same as if you train a regular LoRA 🤗
273
+
274
+ ## Conducting EDM-style training
275
+
276
+ It's now possible to perform EDM-style training as proposed in [Elucidating the Design Space of Diffusion-Based Generative Models](https://huggingface.co/papers/2206.00364).
277
+
278
+ simply set:
279
+
280
+ ```diff
281
+ + --do_edm_style_training \
282
+ ```
283
+
284
+ Other SDXL-like models that use the EDM formulation, such as [playgroundai/playground-v2.5-1024px-aesthetic](https://huggingface.co/playgroundai/playground-v2.5-1024px-aesthetic), can also be DreamBooth'd with the script. Below is an example command:
285
+
286
+ ```bash
287
+ accelerate launch train_dreambooth_lora_sdxl_advanced.py \
288
+ --pretrained_model_name_or_path="playgroundai/playground-v2.5-1024px-aesthetic" \
289
+ --dataset_name="linoyts/3d_icon" \
290
+ --instance_prompt="3d icon in the style of TOK" \
291
+ --validation_prompt="a TOK icon of an astronaut riding a horse, in the style of TOK" \
292
+ --output_dir="3d-icon-SDXL-LoRA" \
293
+ --do_edm_style_training \
294
+ --caption_column="prompt" \
295
+ --mixed_precision="bf16" \
296
+ --resolution=1024 \
297
+ --train_batch_size=3 \
298
+ --repeats=1 \
299
+ --report_to="wandb"\
300
+ --gradient_accumulation_steps=1 \
301
+ --gradient_checkpointing \
302
+ --learning_rate=1.0 \
303
+ --text_encoder_lr=1.0 \
304
+ --optimizer="prodigy"\
305
+ --train_text_encoder_ti\
306
+ --train_text_encoder_ti_frac=0.5\
307
+ --lr_scheduler="constant" \
308
+ --lr_warmup_steps=0 \
309
+ --rank=8 \
310
+ --max_train_steps=1000 \
311
+ --checkpointing_steps=2000 \
312
+ --seed="0" \
313
+ --push_to_hub
314
+ ```
315
+
316
+ > [!CAUTION]
317
+ > Min-SNR gamma is not supported with the EDM-style training yet. When training with the PlaygroundAI model, it's recommended to not pass any "variant".
318
+
319
+ ### B-LoRA training
320
+ The advanced script now supports B-LoRA training too!
321
+ > Proposed in [Implicit Style-Content Separation using B-LoRA](https://huggingface.co/papers/2403.14572),
322
+ B-LoRA is a method that leverages LoRA to implicitly separate the style and content components of a **single** image.
323
+ It was shown that learning the LoRA weights of two specific blocks (referred to as B-LoRAs)
324
+ achieves style-content separation that cannot be achieved by training each B-LoRA independently.
325
+ Once trained, the two B-LoRAs can be used as independent components to allow various image stylization tasks
326
+
327
+ **Usage**
328
+ Enable B-LoRA training by adding this flag
329
+ ```bash
330
+ --use_blora
331
+ ```
332
+ You can train a B-LoRA with as little as 1 image, and 1000 steps. Try this default configuration as a start:
333
+ ```bash
334
+ !accelerate launch train_dreambooth_b-lora_sdxl.py \
335
+ --pretrained_model_name_or_path="stabilityai/stable-diffusion-xl-base-1.0" \
336
+ --instance_data_dir="linoyts/B-LoRA_teddy_bear" \
337
+ --output_dir="B-LoRA_teddy_bear" \
338
+ --instance_prompt="a [v18]" \
339
+ --resolution=1024 \
340
+ --rank=64 \
341
+ --train_batch_size=1 \
342
+ --learning_rate=5e-5 \
343
+ --lr_scheduler="constant" \
344
+ --lr_warmup_steps=0 \
345
+ --max_train_steps=1000 \
346
+ --checkpointing_steps=2000 \
347
+ --seed="0" \
348
+ --gradient_checkpointing \
349
+ --mixed_precision="fp16"
350
+ ```
351
+ **Inference**
352
+ The inference is a bit different:
353
+ 1. we need load *specific* unet layers (as opposed to a regular LoRA/DoRA)
354
+ 2. the trained layers we load, changes based on our objective (e.g. style/content)
355
+
356
+ ```python
357
+ import torch
358
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL
359
+
360
+ # taken & modified from B-LoRA repo - https://github.com/yardenfren1996/B-LoRA/blob/main/blora_utils.py
361
+ def is_belong_to_blocks(key, blocks):
362
+ try:
363
+ for g in blocks:
364
+ if g in key:
365
+ return True
366
+ return False
367
+ except Exception as e:
368
+ raise type(e)(f'failed to is_belong_to_block, due to: {e}')
369
+
370
+ def lora_lora_unet_blocks(lora_path, alpha, target_blocks):
371
+ state_dict, _ = pipeline.lora_state_dict(lora_path)
372
+ filtered_state_dict = {k: v * alpha for k, v in state_dict.items() if is_belong_to_blocks(k, target_blocks)}
373
+ return filtered_state_dict
374
+
375
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
376
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
377
+ "stabilityai/stable-diffusion-xl-base-1.0",
378
+ vae=vae,
379
+ torch_dtype=torch.float16,
380
+ ).to("cuda")
381
+
382
+ # pick a blora for content/style (you can also set one to None)
383
+ content_B_lora_path = "lora-library/B-LoRA-teddybear"
384
+ style_B_lora_path= "lora-library/B-LoRA-pen_sketch"
385
+
386
+
387
+ content_B_LoRA = lora_lora_unet_blocks(content_B_lora_path,alpha=1,target_blocks=["unet.up_blocks.0.attentions.0"])
388
+ style_B_LoRA = lora_lora_unet_blocks(style_B_lora_path,alpha=1.1,target_blocks=["unet.up_blocks.0.attentions.1"])
389
+ combined_lora = {**content_B_LoRA, **style_B_LoRA}
390
+
391
+ # Load both loras
392
+ pipeline.load_lora_into_unet(combined_lora, None, pipeline.unet)
393
+
394
+ #generate
395
+ prompt = "a [v18] in [v30] style"
396
+ pipeline(prompt, num_images_per_prompt=4).images
397
+ ```
398
+ ### LoRA training of Targeted U-net Blocks
399
+ The advanced script now supports custom choice of U-net blocks to train during Dreambooth LoRA tuning.
400
+ > [!NOTE]
401
+ > This feature is still experimental
402
+
403
+ > Recently, works like B-LoRA showed the potential advantages of learning the LoRA weights of specific U-net blocks, not only in speed & memory,
404
+ > but also in reducing the amount of needed data, improving style manipulation and overcoming overfitting issues.
405
+ > In light of this, we're introducing a new feature to the advanced script to allow for configurable U-net learned blocks.
406
+
407
+ **Usage**
408
+ Configure LoRA learned U-net blocks adding a `lora_unet_blocks` flag, with a comma separated string specifying the targeted blocks.
409
+ e.g:
410
+ ```bash
411
+ --lora_unet_blocks="unet.up_blocks.0.attentions.0,unet.up_blocks.0.attentions.1"
412
+ ```
413
+
414
+ > [!NOTE]
415
+ > if you specify both `--use_blora` and `--lora_unet_blocks`, values given in --lora_unet_blocks will be ignored.
416
+ > When enabling --use_blora, targeted U-net blocks are automatically set to be "unet.up_blocks.0.attentions.0,unet.up_blocks.0.attentions.1" as discussed in the paper.
417
+ > If you wish to experiment with different blocks, specify `--lora_unet_blocks` only.
418
+
419
+ **Inference**
420
+ Inference is the same as for B-LoRAs, except the input targeted blocks should be modified based on your training configuration.
421
+ ```python
422
+ import torch
423
+ from diffusers import StableDiffusionXLPipeline, AutoencoderKL
424
+
425
+ # taken & modified from B-LoRA repo - https://github.com/yardenfren1996/B-LoRA/blob/main/blora_utils.py
426
+ def is_belong_to_blocks(key, blocks):
427
+ try:
428
+ for g in blocks:
429
+ if g in key:
430
+ return True
431
+ return False
432
+ except Exception as e:
433
+ raise type(e)(f'failed to is_belong_to_block, due to: {e}')
434
+
435
+ def lora_lora_unet_blocks(lora_path, alpha, target_blocks):
436
+ state_dict, _ = pipeline.lora_state_dict(lora_path)
437
+ filtered_state_dict = {k: v * alpha for k, v in state_dict.items() if is_belong_to_blocks(k, target_blocks)}
438
+ return filtered_state_dict
439
+
440
+ vae = AutoencoderKL.from_pretrained("madebyollin/sdxl-vae-fp16-fix", torch_dtype=torch.float16)
441
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
442
+ "stabilityai/stable-diffusion-xl-base-1.0",
443
+ vae=vae,
444
+ torch_dtype=torch.float16,
445
+ ).to("cuda")
446
+
447
+ lora_path = "lora-library/B-LoRA-pen_sketch"
448
+
449
+ state_dict = lora_lora_unet_blocks(content_B_lora_path,alpha=1,target_blocks=["unet.up_blocks.0.attentions.0"])
450
+
451
+ # Load trained lora layers into the unet
452
+ pipeline.load_lora_into_unet(state_dict, None, pipeline.unet)
453
+
454
+ #generate
455
+ prompt = "a dog in [v30] style"
456
+ pipeline(prompt, num_images_per_prompt=4).images
457
+ ```
458
+
459
+
460
+ ### Tips and Tricks
461
+ Check out [these recommended practices](https://huggingface.co/blog/sdxl_lora_advanced_script#additional-good-practices)
462
+
463
+ ## Running on Colab Notebook
464
+ Check out [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_Dreambooth_LoRA_advanced_example.ipynb).
465
+ to train using the advanced features (including pivotal tuning), and [this notebook](https://colab.research.google.com/github/huggingface/notebooks/blob/main/diffusers/SDXL_DreamBooth_LoRA_.ipynb) to train on a free colab, using some of the advanced features (excluding pivotal tuning)
466
+
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/README_flux.md ADDED
@@ -0,0 +1,381 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Advanced diffusion training examples
2
+
3
+ ## Train Dreambooth LoRA with Flux.1 Dev
4
+ > [!TIP]
5
+ > 💡 This example follows some of the techniques and recommended practices covered in the community derived guide we made for SDXL training: [LoRA training scripts of the world, unite!](https://huggingface.co/blog/sdxl_lora_advanced_script).
6
+ > As many of these are architecture agnostic & generally relevant to fine-tuning of diffusion models we suggest to take a look 🤗
7
+
8
+ [DreamBooth](https://huggingface.co/papers/2208.12242) is a method to personalize text-to-image models like flux, stable diffusion given just a few(3~5) images of a subject.
9
+
10
+ LoRA - Low-Rank Adaption of Large Language Models, was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*
11
+ In a nutshell, LoRA allows to adapt pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages:
12
+ - Previous pretrained weights are kept frozen so that the model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114)
13
+ - Rank-decomposition matrices have significantly fewer parameters than the original model, which means that trained LoRA weights are easily portable.
14
+ - LoRA attention layers allow to control to which extent the model is adapted towards new training images via a `scale` parameter.
15
+ [cloneofsimo](https://github.com/cloneofsimo) was the first to try out LoRA training for Stable Diffusion in
16
+ the popular [lora](https://github.com/cloneofsimo/lora) GitHub repository.
17
+
18
+ The `train_dreambooth_lora_flux_advanced.py` script shows how to implement dreambooth-LoRA, combining the training process shown in `train_dreambooth_lora_flux.py`, with
19
+ advanced features and techniques, inspired and built upon contributions by [Nataniel Ruiz](https://twitter.com/natanielruizg): [Dreambooth](https://dreambooth.github.io), [Rinon Gal](https://twitter.com/RinonGal): [Textual Inversion](https://textual-inversion.github.io), [Ron Mokady](https://twitter.com/MokadyRon): [Pivotal Tuning](https://huggingface.co/papers/2106.05744), [Simo Ryu](https://twitter.com/cloneofsimo): [cog-sdxl](https://github.com/replicate/cog-sdxl),
20
+ [ostris](https://x.com/ostrisai):[ai-toolkit](https://github.com/ostris/ai-toolkit), [bghira](https://github.com/bghira):[SimpleTuner](https://github.com/bghira/SimpleTuner), [Kohya](https://twitter.com/kohya_tech/): [sd-scripts](https://github.com/kohya-ss/sd-scripts), [The Last Ben](https://twitter.com/__TheBen): [fast-stable-diffusion](https://github.com/TheLastBen/fast-stable-diffusion) ❤️
21
+
22
+ > [!NOTE]
23
+ > 💡If this is your first time training a Dreambooth LoRA, congrats!🥳
24
+ > You might want to familiarize yourself more with the techniques: [Dreambooth blog](https://huggingface.co/blog/dreambooth), [Using LoRA for Efficient Stable Diffusion Fine-Tuning blog](https://huggingface.co/blog/lora)
25
+
26
+ ## Running locally with PyTorch
27
+
28
+ ### Installing the dependencies
29
+
30
+ Before running the scripts, make sure to install the library's training dependencies:
31
+
32
+ **Important**
33
+
34
+ To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
35
+ ```bash
36
+ git clone https://github.com/huggingface/diffusers
37
+ cd diffusers
38
+ pip install -e .
39
+ ```
40
+
41
+ Then cd in the `examples/advanced_diffusion_training` folder and run
42
+ ```bash
43
+ pip install -r requirements.txt
44
+ ```
45
+
46
+ And initialize an [🤗Accelerate](https://github.com/huggingface/accelerate/) environment with:
47
+
48
+ ```bash
49
+ accelerate config
50
+ ```
51
+
52
+ Or for a default accelerate configuration without answering questions about your environment
53
+
54
+ ```bash
55
+ accelerate config default
56
+ ```
57
+
58
+ Or if your environment doesn't support an interactive shell e.g. a notebook
59
+
60
+ ```python
61
+ from accelerate.utils import write_basic_config
62
+ write_basic_config()
63
+ ```
64
+
65
+ When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups.
66
+ Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment.
67
+
68
+ Lastly, we recommend logging into your HF account so that your trained LoRA is automatically uploaded to the hub:
69
+ ```bash
70
+ hf auth login
71
+ ```
72
+ This command will prompt you for a token. Copy-paste yours from your [settings/tokens](https://huggingface.co/settings/tokens),and press Enter.
73
+
74
+ > [!NOTE]
75
+ > In the examples below we use `wandb` to document the training runs. To do the same, make sure to install `wandb`:
76
+ > `pip install wandb`
77
+ > Alternatively, you can use other tools / train without reporting by modifying the flag `--report_to="wandb"`.
78
+
79
+ ### LoRA Rank and Alpha
80
+ Two key LoRA hyperparameters are LoRA rank and LoRA alpha.
81
+ - `--rank`: Defines the dimension of the trainable LoRA matrices. A higher rank means more expressiveness and capacity to learn (and more parameters).
82
+ - `--lora_alpha`: A scaling factor for the LoRA's output. The LoRA update is scaled by lora_alpha / lora_rank.
83
+ - lora_alpha vs. rank:
84
+ This ratio dictates the LoRA's effective strength:
85
+ lora_alpha == rank: Scaling factor is 1. The LoRA is applied with its learned strength. (e.g., alpha=16, rank=16)
86
+ lora_alpha < rank: Scaling factor < 1. Reduces the LoRA's impact. Useful for subtle changes or to prevent overpowering the base model. (e.g., alpha=8, rank=16)
87
+ lora_alpha > rank: Scaling factor > 1. Amplifies the LoRA's impact. Allows a lower rank LoRA to have a stronger effect. (e.g., alpha=32, rank=16)
88
+
89
+ > [!TIP]
90
+ > A common starting point is to set `lora_alpha` equal to `rank`.
91
+ > Some also set `lora_alpha` to be twice the `rank` (e.g., lora_alpha=32 for lora_rank=16)
92
+ > to give the LoRA updates more influence without increasing parameter count.
93
+ > If you find your LoRA is "overcooking" or learning too aggressively, consider setting `lora_alpha` to half of `rank`
94
+ > (e.g., lora_alpha=8 for rank=16). Experimentation is often key to finding the optimal balance for your use case.
95
+
96
+
97
+ ### Target Modules
98
+ When LoRA was first adapted from language models to diffusion models, it was applied to the cross-attention layers in the Unet that relate the image representations with the prompts that describe them.
99
+ More recently, SOTA text-to-image diffusion models replaced the Unet with a diffusion Transformer(DiT). With this change, we may also want to explore
100
+ applying LoRA training onto different types of layers and blocks. To allow more flexibility and control over the targeted modules we added `--lora_layers`- in which you can specify in a comma separated string
101
+ the exact modules for LoRA training. Here are some examples of target modules you can provide:
102
+ - for attention only layers: `--lora_layers="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0"`
103
+ - to train the same modules as in the fal trainer: `--lora_layers="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0,attn.add_k_proj,attn.add_q_proj,attn.add_v_proj,attn.to_add_out,ff.net.0.proj,ff.net.2,ff_context.net.0.proj,ff_context.net.2"`
104
+ - to train the same modules as in ostris ai-toolkit / replicate trainer: `--lora_blocks="attn.to_k,attn.to_q,attn.to_v,attn.to_out.0,attn.add_k_proj,attn.add_q_proj,attn.add_v_proj,attn.to_add_out,ff.net.0.proj,ff.net.2,ff_context.net.0.proj,ff_context.net.2,norm1_context.linear, norm1.linear,norm.linear,proj_mlp,proj_out"`
105
+ > [!NOTE]
106
+ > `--lora_layers` can also be used to specify which **blocks** to apply LoRA training to. To do so, simply add a block prefix to each layer in the comma separated string:
107
+ > **single DiT blocks**: to target the ith single transformer block, add the prefix `single_transformer_blocks.i`, e.g. - `single_transformer_blocks.i.attn.to_k`
108
+ > **MMDiT blocks**: to target the ith MMDiT block, add the prefix `transformer_blocks.i`, e.g. - `transformer_blocks.i.attn.to_k`
109
+ > [!NOTE]
110
+ > keep in mind that while training more layers can improve quality and expressiveness, it also increases the size of the output LoRA weights.
111
+
112
+ ### Pivotal Tuning (and more)
113
+ **Training with text encoder(s)**
114
+
115
+ Alongside the Transformer, LoRA fine-tuning of the text encoders is also supported. In addition to the text encoder optimization
116
+ available with `train_dreambooth_lora_flux_advanced.py`, in the advanced script **pivotal tuning** is also supported.
117
+ [pivotal tuning](https://huggingface.co/blog/sdxl_lora_advanced_script#pivotal-tuning) combines Textual Inversion with regular diffusion fine-tuning -
118
+ we insert new tokens into the text encoders of the model, instead of reusing existing ones.
119
+ We then optimize the newly-inserted token embeddings to represent the new concept.
120
+
121
+ To do so, just specify `--train_text_encoder_ti` while launching training (for regular text encoder optimizations, use `--train_text_encoder`).
122
+ Please keep the following points in mind:
123
+
124
+ * Flux uses two text encoders - [CLIP](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux#diffusers.FluxPipeline.text_encoder) & [T5](https://huggingface.co/docs/diffusers/main/en/api/pipelines/flux#diffusers.FluxPipeline.text_encoder_2) , by default `--train_text_encoder_ti` performs pivotal tuning for the **CLIP** encoder only.
125
+ To activate pivotal tuning for both encoders, add the flag `--enable_t5_ti`.
126
+ * When not fine-tuning the text encoders, we ALWAYS precompute the text embeddings to save memory.
127
+ * **pure textual inversion** - to support the full range from pivotal tuning to textual inversion we introduce `--train_transformer_frac` which controls the amount of epochs the transformer LoRA layers are trained. By default, `--train_transformer_frac==1`, to trigger a textual inversion run set `--train_transformer_frac==0`. Values between 0 and 1 are supported as well, and we welcome the community to experiment w/ different settings and share the results!
128
+ * **token initializer** - similar to the original textual inversion work, you can specify a concept of your choosing as the starting point for training. By default, when enabling `--train_text_encoder_ti`, the new inserted tokens are initialized randomly. You can specify a token in `--initializer_concept` such that the starting point for the trained embeddings will be the embeddings associated with your chosen `--initializer_concept`.
129
+
130
+ ## Training examples
131
+
132
+ Now let's get our dataset. For this example we will use some cool images of 3d rendered icons: https://huggingface.co/datasets/linoyts/3d_icon.
133
+
134
+ Let's first download it locally:
135
+
136
+ ```python
137
+ from huggingface_hub import snapshot_download
138
+
139
+ local_dir = "./3d_icon"
140
+ snapshot_download(
141
+ "LinoyTsaban/3d_icon",
142
+ local_dir=local_dir, repo_type="dataset",
143
+ ignore_patterns=".gitattributes",
144
+ )
145
+ ```
146
+
147
+ Let's review some of the advanced features we're going to be using for this example:
148
+ - **custom captions**:
149
+ To use custom captioning, first ensure that you have the datasets library installed, otherwise you can install it by
150
+ ```bash
151
+ pip install datasets
152
+ ```
153
+
154
+ Now we'll simply specify the name of the dataset and caption column (in this case it's "prompt")
155
+
156
+ ```
157
+ --dataset_name=./3d_icon
158
+ --caption_column=prompt
159
+ ```
160
+
161
+ You can also load a dataset straight from by specifying it's name in `dataset_name`.
162
+ Look [here](https://huggingface.co/blog/sdxl_lora_advanced_script#custom-captioning) for more info on creating/loading your own caption dataset.
163
+
164
+ - **optimizer**: for this example, we'll use [prodigy](https://huggingface.co/blog/sdxl_lora_advanced_script#adaptive-optimizers) - an adaptive optimizer
165
+ - To use Prodigy, please make sure to install the prodigyopt library: `pip install prodigyopt`
166
+ - **pivotal tuning**
167
+
168
+ ### Example #1: Pivotal tuning
169
+ **Now, we can launch training:**
170
+
171
+ ```bash
172
+ export MODEL_NAME="black-forest-labs/FLUX.1-dev"
173
+ export DATASET_NAME="./3d_icon"
174
+ export OUTPUT_DIR="3d-icon-Flux-LoRA"
175
+
176
+ accelerate launch train_dreambooth_lora_flux_advanced.py \
177
+ --pretrained_model_name_or_path=$MODEL_NAME \
178
+ --dataset_name=$DATASET_NAME \
179
+ --instance_prompt="3d icon in the style of TOK" \
180
+ --output_dir=$OUTPUT_DIR \
181
+ --caption_column="prompt" \
182
+ --mixed_precision="bf16" \
183
+ --resolution=1024 \
184
+ --train_batch_size=1 \
185
+ --repeats=1 \
186
+ --report_to="wandb"\
187
+ --gradient_accumulation_steps=1 \
188
+ --gradient_checkpointing \
189
+ --learning_rate=1.0 \
190
+ --text_encoder_lr=1.0 \
191
+ --optimizer="prodigy"\
192
+ --train_text_encoder_ti\
193
+ --train_text_encoder_ti_frac=0.5\
194
+ --lr_scheduler="constant" \
195
+ --lr_warmup_steps=0 \
196
+ --rank=8 \
197
+ --max_train_steps=700 \
198
+ --checkpointing_steps=2000 \
199
+ --seed="0" \
200
+ --push_to_hub
201
+ ```
202
+
203
+ To better track our training experiments, we're using the following flags in the command above:
204
+
205
+ * `report_to="wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`.
206
+ * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
207
+
208
+ Our experiments were conducted on a single 40GB A100 GPU.
209
+
210
+ ### Example #2: Pivotal tuning with T5
211
+ Now let's try that with T5 as well, so instead of only optimizing the CLIP embeddings associated with newly inserted tokens, we'll optimize
212
+ the T5 embeddings as well. We can do this by simply adding `--enable_t5_ti` to the previous configuration:
213
+ ```bash
214
+ export MODEL_NAME="black-forest-labs/FLUX.1-dev"
215
+ export DATASET_NAME="./3d_icon"
216
+ export OUTPUT_DIR="3d-icon-Flux-LoRA"
217
+
218
+ accelerate launch train_dreambooth_lora_flux_advanced.py \
219
+ --pretrained_model_name_or_path=$MODEL_NAME \
220
+ --dataset_name=$DATASET_NAME \
221
+ --instance_prompt="3d icon in the style of TOK" \
222
+ --output_dir=$OUTPUT_DIR \
223
+ --caption_column="prompt" \
224
+ --mixed_precision="bf16" \
225
+ --resolution=1024 \
226
+ --train_batch_size=1 \
227
+ --repeats=1 \
228
+ --report_to="wandb"\
229
+ --gradient_accumulation_steps=1 \
230
+ --gradient_checkpointing \
231
+ --learning_rate=1.0 \
232
+ --text_encoder_lr=1.0 \
233
+ --optimizer="prodigy"\
234
+ --train_text_encoder_ti\
235
+ --enable_t5_ti\
236
+ --train_text_encoder_ti_frac=0.5\
237
+ --lr_scheduler="constant" \
238
+ --lr_warmup_steps=0 \
239
+ --rank=8 \
240
+ --max_train_steps=700 \
241
+ --checkpointing_steps=2000 \
242
+ --seed="0" \
243
+ --push_to_hub
244
+ ```
245
+
246
+ ### Example #3: Textual Inversion
247
+ To explore a pure textual inversion - i.e. only optimizing the text embeddings w/o training transformer LoRA layers, we
248
+ can set the value for `--train_transformer_frac` - which is responsible for the percent of epochs in which the transformer is
249
+ trained. By setting `--train_transformer_frac == 0` and enabling `--train_text_encoder_ti` we trigger a textual inversion train
250
+ run.
251
+ ```bash
252
+ export MODEL_NAME="black-forest-labs/FLUX.1-dev"
253
+ export DATASET_NAME="./3d_icon"
254
+ export OUTPUT_DIR="3d-icon-Flux-LoRA"
255
+
256
+ accelerate launch train_dreambooth_lora_flux_advanced.py \
257
+ --pretrained_model_name_or_path=$MODEL_NAME \
258
+ --dataset_name=$DATASET_NAME \
259
+ --instance_prompt="3d icon in the style of TOK" \
260
+ --output_dir=$OUTPUT_DIR \
261
+ --caption_column="prompt" \
262
+ --mixed_precision="bf16" \
263
+ --resolution=1024 \
264
+ --train_batch_size=1 \
265
+ --repeats=1 \
266
+ --report_to="wandb"\
267
+ --gradient_accumulation_steps=1 \
268
+ --gradient_checkpointing \
269
+ --learning_rate=1.0 \
270
+ --text_encoder_lr=1.0 \
271
+ --optimizer="prodigy"\
272
+ --train_text_encoder_ti\
273
+ --enable_t5_ti\
274
+ --train_text_encoder_ti_frac=0.5\
275
+ --train_transformer_frac=0\
276
+ --lr_scheduler="constant" \
277
+ --lr_warmup_steps=0 \
278
+ --rank=8 \
279
+ --max_train_steps=700 \
280
+ --checkpointing_steps=2000 \
281
+ --seed="0" \
282
+ --push_to_hub
283
+ ```
284
+ ### Inference - pivotal tuning
285
+
286
+ Once training is done, we can perform inference like so:
287
+ 1. starting with loading the transformer lora weights
288
+ ```python
289
+ import torch
290
+ from huggingface_hub import hf_hub_download, upload_file
291
+ from diffusers import AutoPipelineForText2Image
292
+ from safetensors.torch import load_file
293
+
294
+ username = "linoyts"
295
+ repo_id = f"{username}/3d-icon-Flux-LoRA"
296
+
297
+ pipe = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda')
298
+
299
+
300
+ pipe.load_lora_weights(repo_id, weight_name="pytorch_lora_weights.safetensors")
301
+ ```
302
+ 2. now we load the pivotal tuning embeddings
303
+ > [!NOTE] #1 if `--enable_t5_ti` wasn't passed, we only load the embeddings to the CLIP encoder.
304
+
305
+ > [!NOTE] #2 the number of tokens (i.e. <s0>,...,<si>) is either determined by `--num_new_tokens_per_abstraction` or by `--initializer_concept`. Make sure to update inference code accordingly :)
306
+ ```python
307
+ text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
308
+ tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
309
+
310
+ embedding_path = hf_hub_download(repo_id=repo_id, filename="3d-icon-Flux-LoRA_emb.safetensors", repo_type="model")
311
+
312
+ state_dict = load_file(embedding_path)
313
+ # load embeddings of text_encoder 1 (CLIP ViT-L/14)
314
+ pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
315
+ # load embeddings of text_encoder 2 (T5 XXL) - ignore this line if you didn't enable `--enable_t5_ti`
316
+ pipe.load_textual_inversion(state_dict["t5"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
317
+ ```
318
+
319
+ 3. let's generate images
320
+
321
+ ```python
322
+ instance_token = "<s0><s1>"
323
+ prompt = f"a {instance_token} icon of an orange llama eating ramen, in the style of {instance_token}"
324
+
325
+ image = pipe(prompt=prompt, num_inference_steps=25, cross_attention_kwargs={"scale": 1.0}).images[0]
326
+ image.save("llama.png")
327
+ ```
328
+
329
+ ### Inference - pure textual inversion
330
+ In this case, we don't load transformer layers as before, since we only optimize the text embeddings. The output of a textual inversion train run is a
331
+ `.safetensors` file containing the trained embeddings for the new tokens either for the CLIP encoder, or for both encoders (CLIP and T5)
332
+
333
+ 1. starting with loading the embeddings.
334
+ 💡note that here too, if you didn't enable `--enable_t5_ti`, you only load the embeddings to the CLIP encoder
335
+
336
+ ```python
337
+ import torch
338
+ from huggingface_hub import hf_hub_download, upload_file
339
+ from diffusers import AutoPipelineForText2Image
340
+ from safetensors.torch import load_file
341
+
342
+ username = "linoyts"
343
+ repo_id = f"{username}/3d-icon-Flux-LoRA"
344
+
345
+ pipe = AutoPipelineForText2Image.from_pretrained("black-forest-labs/FLUX.1-dev", torch_dtype=torch.bfloat16).to('cuda')
346
+
347
+ text_encoders = [pipe.text_encoder, pipe.text_encoder_2]
348
+ tokenizers = [pipe.tokenizer, pipe.tokenizer_2]
349
+
350
+ embedding_path = hf_hub_download(repo_id=repo_id, filename="3d-icon-Flux-LoRA_emb.safetensors", repo_type="model")
351
+
352
+ state_dict = load_file(embedding_path)
353
+ # load embeddings of text_encoder 1 (CLIP ViT-L/14)
354
+ pipe.load_textual_inversion(state_dict["clip_l"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder, tokenizer=pipe.tokenizer)
355
+ # load embeddings of text_encoder 2 (T5 XXL) - ignore this line if you didn't enable `--enable_t5_ti`
356
+ pipe.load_textual_inversion(state_dict["t5"], token=["<s0>", "<s1>"], text_encoder=pipe.text_encoder_2, tokenizer=pipe.tokenizer_2)
357
+ ```
358
+ 2. let's generate images
359
+
360
+ ```python
361
+ instance_token = "<s0><s1>"
362
+ prompt = f"a {instance_token} icon of an orange llama eating ramen, in the style of {instance_token}"
363
+
364
+ image = pipe(prompt=prompt, num_inference_steps=25, cross_attention_kwargs={"scale": 1.0}).images[0]
365
+ image.save("llama.png")
366
+ ```
367
+
368
+ ### Comfy UI / AUTOMATIC1111 Inference
369
+ The new script fully supports textual inversion loading with Comfy UI and AUTOMATIC1111 formats!
370
+
371
+ **AUTOMATIC1111 / SD.Next** \
372
+ In AUTOMATIC1111/SD.Next we will load a LoRA and a textual embedding at the same time.
373
+ - *LoRA*: Besides the diffusers format, the script will also train a WebUI compatible LoRA. It is generated as `{your_lora_name}.safetensors`. You can then include it in your `models/Lora` directory.
374
+ - *Embedding*: the embedding is the same for diffusers and WebUI. You can download your `{lora_name}_emb.safetensors` file from a trained model, and include it in your `embeddings` directory.
375
+
376
+ You can then run inference by prompting `a y2k_emb webpage about the movie Mean Girls <lora:y2k:0.9>`. You can use the `y2k_emb` token normally, including increasing its weight by doing `(y2k_emb:1.2)`.
377
+
378
+ **ComfyUI** \
379
+ In ComfyUI we will load a LoRA and a textual embedding at the same time.
380
+ - *LoRA*: Besides the diffusers format, the script will also train a ComfyUI compatible LoRA. It is generated as `{your_lora_name}.safetensors`. You can then include it in your `models/Lora` directory. Then you will load the LoRALoader node and hook that up with your model and CLIP. [Official guide for loading LoRAs](https://comfyanonymous.github.io/ComfyUI_examples/lora/)
381
+ - *Embedding*: the embedding is the same for diffusers and WebUI. You can download your `{lora_name}_emb.safetensors` file from a trained model, and include it in your `models/embeddings` directory and use it in your prompts like `embedding:y2k_emb`. [Official guide for loading embeddings](https://comfyanonymous.github.io/ComfyUI_examples/textual_inversion_embeddings/).
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/requirements.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ accelerate>=0.31.0
2
+ torchvision
3
+ transformers>=4.41.2
4
+ ftfy
5
+ tensorboard
6
+ Jinja2
7
+ peft>=0.11.1
8
+ sentencepiece
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/requirements_flux.txt ADDED
@@ -0,0 +1,8 @@
 
 
 
 
 
 
 
 
 
1
+ accelerate>=0.31.0
2
+ torchvision
3
+ transformers>=4.41.2
4
+ ftfy
5
+ tensorboard
6
+ Jinja2
7
+ peft>=0.11.1
8
+ sentencepiece
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/test_dreambooth_lora_flux_advanced.py ADDED
@@ -0,0 +1,328 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 HuggingFace Inc.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import json
17
+ import logging
18
+ import os
19
+ import sys
20
+ import tempfile
21
+
22
+ import safetensors
23
+
24
+ from diffusers.loaders.lora_base import LORA_ADAPTER_METADATA_KEY
25
+
26
+
27
+ sys.path.append("..")
28
+ from test_examples_utils import ExamplesTestsAccelerate, run_command # noqa: E402
29
+
30
+
31
+ logging.basicConfig(level=logging.DEBUG)
32
+
33
+ logger = logging.getLogger()
34
+ stream_handler = logging.StreamHandler(sys.stdout)
35
+ logger.addHandler(stream_handler)
36
+
37
+
38
+ class DreamBoothLoRAFluxAdvanced(ExamplesTestsAccelerate):
39
+ instance_data_dir = "docs/source/en/imgs"
40
+ instance_prompt = "photo"
41
+ pretrained_model_name_or_path = "hf-internal-testing/tiny-flux-pipe"
42
+ script_path = "examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py"
43
+
44
+ def test_dreambooth_lora_flux(self):
45
+ with tempfile.TemporaryDirectory() as tmpdir:
46
+ test_args = f"""
47
+ {self.script_path}
48
+ --pretrained_model_name_or_path {self.pretrained_model_name_or_path}
49
+ --instance_data_dir {self.instance_data_dir}
50
+ --instance_prompt {self.instance_prompt}
51
+ --resolution 64
52
+ --train_batch_size 1
53
+ --gradient_accumulation_steps 1
54
+ --max_train_steps 2
55
+ --learning_rate 5.0e-04
56
+ --scale_lr
57
+ --lr_scheduler constant
58
+ --lr_warmup_steps 0
59
+ --output_dir {tmpdir}
60
+ """.split()
61
+
62
+ run_command(self._launch_args + test_args)
63
+ # save_pretrained smoke test
64
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
65
+
66
+ # make sure the state_dict has the correct naming in the parameters.
67
+ lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
68
+ is_lora = all("lora" in k for k in lora_state_dict.keys())
69
+ self.assertTrue(is_lora)
70
+
71
+ # when not training the text encoder, all the parameters in the state dict should start
72
+ # with `"transformer"` in their names.
73
+ starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
74
+ self.assertTrue(starts_with_transformer)
75
+
76
+ def test_dreambooth_lora_text_encoder_flux(self):
77
+ with tempfile.TemporaryDirectory() as tmpdir:
78
+ test_args = f"""
79
+ {self.script_path}
80
+ --pretrained_model_name_or_path {self.pretrained_model_name_or_path}
81
+ --instance_data_dir {self.instance_data_dir}
82
+ --instance_prompt {self.instance_prompt}
83
+ --resolution 64
84
+ --train_batch_size 1
85
+ --train_text_encoder
86
+ --gradient_accumulation_steps 1
87
+ --max_train_steps 2
88
+ --learning_rate 5.0e-04
89
+ --scale_lr
90
+ --lr_scheduler constant
91
+ --lr_warmup_steps 0
92
+ --output_dir {tmpdir}
93
+ """.split()
94
+
95
+ run_command(self._launch_args + test_args)
96
+ # save_pretrained smoke test
97
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
98
+
99
+ # make sure the state_dict has the correct naming in the parameters.
100
+ lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
101
+ is_lora = all("lora" in k for k in lora_state_dict.keys())
102
+ self.assertTrue(is_lora)
103
+
104
+ starts_with_expected_prefix = all(
105
+ (key.startswith("transformer") or key.startswith("text_encoder")) for key in lora_state_dict.keys()
106
+ )
107
+ self.assertTrue(starts_with_expected_prefix)
108
+
109
+ def test_dreambooth_lora_pivotal_tuning_flux_clip(self):
110
+ with tempfile.TemporaryDirectory() as tmpdir:
111
+ test_args = f"""
112
+ {self.script_path}
113
+ --pretrained_model_name_or_path {self.pretrained_model_name_or_path}
114
+ --instance_data_dir {self.instance_data_dir}
115
+ --instance_prompt {self.instance_prompt}
116
+ --resolution 64
117
+ --train_batch_size 1
118
+ --train_text_encoder_ti
119
+ --gradient_accumulation_steps 1
120
+ --max_train_steps 2
121
+ --learning_rate 5.0e-04
122
+ --scale_lr
123
+ --lr_scheduler constant
124
+ --lr_warmup_steps 0
125
+ --output_dir {tmpdir}
126
+ """.split()
127
+
128
+ run_command(self._launch_args + test_args)
129
+ # save_pretrained smoke test
130
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
131
+ # make sure embeddings were also saved
132
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors")))
133
+
134
+ # make sure the state_dict has the correct naming in the parameters.
135
+ lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
136
+ is_lora = all("lora" in k for k in lora_state_dict.keys())
137
+ self.assertTrue(is_lora)
138
+
139
+ # make sure the state_dict has the correct naming in the parameters.
140
+ textual_inversion_state_dict = safetensors.torch.load_file(
141
+ os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors")
142
+ )
143
+ is_clip = all("clip_l" in k for k in textual_inversion_state_dict.keys())
144
+ self.assertTrue(is_clip)
145
+
146
+ # when performing pivotal tuning, all the parameters in the state dict should start
147
+ # with `"transformer"` in their names.
148
+ starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
149
+ self.assertTrue(starts_with_transformer)
150
+
151
+ def test_dreambooth_lora_pivotal_tuning_flux_clip_t5(self):
152
+ with tempfile.TemporaryDirectory() as tmpdir:
153
+ test_args = f"""
154
+ {self.script_path}
155
+ --pretrained_model_name_or_path {self.pretrained_model_name_or_path}
156
+ --instance_data_dir {self.instance_data_dir}
157
+ --instance_prompt {self.instance_prompt}
158
+ --resolution 64
159
+ --train_batch_size 1
160
+ --train_text_encoder_ti
161
+ --enable_t5_ti
162
+ --gradient_accumulation_steps 1
163
+ --max_train_steps 2
164
+ --learning_rate 5.0e-04
165
+ --scale_lr
166
+ --lr_scheduler constant
167
+ --lr_warmup_steps 0
168
+ --output_dir {tmpdir}
169
+ """.split()
170
+
171
+ run_command(self._launch_args + test_args)
172
+ # save_pretrained smoke test
173
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
174
+ # make sure embeddings were also saved
175
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors")))
176
+
177
+ # make sure the state_dict has the correct naming in the parameters.
178
+ lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
179
+ is_lora = all("lora" in k for k in lora_state_dict.keys())
180
+ self.assertTrue(is_lora)
181
+
182
+ # make sure the state_dict has the correct naming in the parameters.
183
+ textual_inversion_state_dict = safetensors.torch.load_file(
184
+ os.path.join(tmpdir, f"{os.path.basename(tmpdir)}_emb.safetensors")
185
+ )
186
+ is_te = all(("clip_l" in k or "t5" in k) for k in textual_inversion_state_dict.keys())
187
+ self.assertTrue(is_te)
188
+
189
+ # when performing pivotal tuning, all the parameters in the state dict should start
190
+ # with `"transformer"` in their names.
191
+ starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
192
+ self.assertTrue(starts_with_transformer)
193
+
194
+ def test_dreambooth_lora_latent_caching(self):
195
+ with tempfile.TemporaryDirectory() as tmpdir:
196
+ test_args = f"""
197
+ {self.script_path}
198
+ --pretrained_model_name_or_path {self.pretrained_model_name_or_path}
199
+ --instance_data_dir {self.instance_data_dir}
200
+ --instance_prompt {self.instance_prompt}
201
+ --resolution 64
202
+ --train_batch_size 1
203
+ --gradient_accumulation_steps 1
204
+ --max_train_steps 2
205
+ --cache_latents
206
+ --learning_rate 5.0e-04
207
+ --scale_lr
208
+ --lr_scheduler constant
209
+ --lr_warmup_steps 0
210
+ --output_dir {tmpdir}
211
+ """.split()
212
+
213
+ run_command(self._launch_args + test_args)
214
+ # save_pretrained smoke test
215
+ self.assertTrue(os.path.isfile(os.path.join(tmpdir, "pytorch_lora_weights.safetensors")))
216
+
217
+ # make sure the state_dict has the correct naming in the parameters.
218
+ lora_state_dict = safetensors.torch.load_file(os.path.join(tmpdir, "pytorch_lora_weights.safetensors"))
219
+ is_lora = all("lora" in k for k in lora_state_dict.keys())
220
+ self.assertTrue(is_lora)
221
+
222
+ # when not training the text encoder, all the parameters in the state dict should start
223
+ # with `"transformer"` in their names.
224
+ starts_with_transformer = all(key.startswith("transformer") for key in lora_state_dict.keys())
225
+ self.assertTrue(starts_with_transformer)
226
+
227
+ def test_dreambooth_lora_flux_checkpointing_checkpoints_total_limit(self):
228
+ with tempfile.TemporaryDirectory() as tmpdir:
229
+ test_args = f"""
230
+ {self.script_path}
231
+ --pretrained_model_name_or_path={self.pretrained_model_name_or_path}
232
+ --instance_data_dir={self.instance_data_dir}
233
+ --output_dir={tmpdir}
234
+ --instance_prompt={self.instance_prompt}
235
+ --resolution=64
236
+ --train_batch_size=1
237
+ --gradient_accumulation_steps=1
238
+ --max_train_steps=6
239
+ --checkpoints_total_limit=2
240
+ --checkpointing_steps=2
241
+ """.split()
242
+
243
+ run_command(self._launch_args + test_args)
244
+
245
+ self.assertEqual(
246
+ {x for x in os.listdir(tmpdir) if "checkpoint" in x},
247
+ {"checkpoint-4", "checkpoint-6"},
248
+ )
249
+
250
+ def test_dreambooth_lora_flux_checkpointing_checkpoints_total_limit_removes_multiple_checkpoints(self):
251
+ with tempfile.TemporaryDirectory() as tmpdir:
252
+ test_args = f"""
253
+ {self.script_path}
254
+ --pretrained_model_name_or_path={self.pretrained_model_name_or_path}
255
+ --instance_data_dir={self.instance_data_dir}
256
+ --output_dir={tmpdir}
257
+ --instance_prompt={self.instance_prompt}
258
+ --resolution=64
259
+ --train_batch_size=1
260
+ --gradient_accumulation_steps=1
261
+ --max_train_steps=4
262
+ --checkpointing_steps=2
263
+ """.split()
264
+
265
+ run_command(self._launch_args + test_args)
266
+
267
+ self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-2", "checkpoint-4"})
268
+
269
+ resume_run_args = f"""
270
+ {self.script_path}
271
+ --pretrained_model_name_or_path={self.pretrained_model_name_or_path}
272
+ --instance_data_dir={self.instance_data_dir}
273
+ --output_dir={tmpdir}
274
+ --instance_prompt={self.instance_prompt}
275
+ --resolution=64
276
+ --train_batch_size=1
277
+ --gradient_accumulation_steps=1
278
+ --max_train_steps=8
279
+ --checkpointing_steps=2
280
+ --resume_from_checkpoint=checkpoint-4
281
+ --checkpoints_total_limit=2
282
+ """.split()
283
+
284
+ run_command(self._launch_args + resume_run_args)
285
+
286
+ self.assertEqual({x for x in os.listdir(tmpdir) if "checkpoint" in x}, {"checkpoint-6", "checkpoint-8"})
287
+
288
+ def test_dreambooth_lora_with_metadata(self):
289
+ # Use a `lora_alpha` that is different from `rank`.
290
+ lora_alpha = 8
291
+ rank = 4
292
+ with tempfile.TemporaryDirectory() as tmpdir:
293
+ test_args = f"""
294
+ {self.script_path}
295
+ --pretrained_model_name_or_path {self.pretrained_model_name_or_path}
296
+ --instance_data_dir {self.instance_data_dir}
297
+ --instance_prompt {self.instance_prompt}
298
+ --resolution 64
299
+ --train_batch_size 1
300
+ --gradient_accumulation_steps 1
301
+ --max_train_steps 2
302
+ --lora_alpha={lora_alpha}
303
+ --rank={rank}
304
+ --learning_rate 5.0e-04
305
+ --scale_lr
306
+ --lr_scheduler constant
307
+ --lr_warmup_steps 0
308
+ --output_dir {tmpdir}
309
+ """.split()
310
+
311
+ run_command(self._launch_args + test_args)
312
+ # save_pretrained smoke test
313
+ state_dict_file = os.path.join(tmpdir, "pytorch_lora_weights.safetensors")
314
+ self.assertTrue(os.path.isfile(state_dict_file))
315
+
316
+ # Check if the metadata was properly serialized.
317
+ with safetensors.torch.safe_open(state_dict_file, framework="pt", device="cpu") as f:
318
+ metadata = f.metadata() or {}
319
+
320
+ metadata.pop("format", None)
321
+ raw = metadata.get(LORA_ADAPTER_METADATA_KEY)
322
+ if raw:
323
+ raw = json.loads(raw)
324
+
325
+ loaded_lora_alpha = raw["transformer.lora_alpha"]
326
+ self.assertTrue(loaded_lora_alpha == lora_alpha)
327
+ loaded_lora_rank = raw["transformer.r"]
328
+ self.assertTrue(loaded_lora_rank == rank)
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/train_dreambooth_lora_flux_advanced.py ADDED
The diff for this file is too large to render. See raw diff
 
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py ADDED
@@ -0,0 +1,2081 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ # /// script
18
+ # dependencies = [
19
+ # "diffusers @ git+https://github.com/huggingface/diffusers.git",
20
+ # "torch>=2.0.0",
21
+ # "accelerate>=0.31.0",
22
+ # "transformers>=4.41.2",
23
+ # "ftfy",
24
+ # "tensorboard",
25
+ # "Jinja2",
26
+ # "peft>=0.11.1",
27
+ # "sentencepiece",
28
+ # ]
29
+ # ///
30
+
31
+ import argparse
32
+ import gc
33
+ import hashlib
34
+ import itertools
35
+ import logging
36
+ import math
37
+ import os
38
+ import re
39
+ import shutil
40
+ import warnings
41
+ from contextlib import nullcontext
42
+ from pathlib import Path
43
+ from typing import List, Optional
44
+
45
+ import numpy as np
46
+ import torch
47
+ import torch.nn.functional as F
48
+
49
+ # imports of the TokenEmbeddingsHandler class
50
+ import torch.utils.checkpoint
51
+ import transformers
52
+ from accelerate import Accelerator
53
+ from accelerate.logging import get_logger
54
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
55
+ from huggingface_hub import create_repo, upload_folder
56
+ from packaging import version
57
+ from peft import LoraConfig, set_peft_model_state_dict
58
+ from peft.utils import get_peft_model_state_dict
59
+ from PIL import Image
60
+ from PIL.ImageOps import exif_transpose
61
+ from safetensors.torch import load_file, save_file
62
+ from torch.utils.data import Dataset
63
+ from torchvision import transforms
64
+ from tqdm.auto import tqdm
65
+ from transformers import AutoTokenizer, PretrainedConfig
66
+
67
+ import diffusers
68
+ from diffusers import (
69
+ AutoencoderKL,
70
+ DDPMScheduler,
71
+ DPMSolverMultistepScheduler,
72
+ StableDiffusionPipeline,
73
+ UNet2DConditionModel,
74
+ )
75
+ from diffusers.loaders import StableDiffusionLoraLoaderMixin
76
+ from diffusers.optimization import get_scheduler
77
+ from diffusers.training_utils import _set_state_dict_into_text_encoder, cast_training_params, compute_snr
78
+ from diffusers.utils import (
79
+ check_min_version,
80
+ convert_all_state_dict_to_peft,
81
+ convert_state_dict_to_diffusers,
82
+ convert_state_dict_to_kohya,
83
+ convert_unet_state_dict_to_peft,
84
+ is_wandb_available,
85
+ )
86
+ from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
87
+ from diffusers.utils.import_utils import is_xformers_available
88
+
89
+
90
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
91
+ check_min_version("0.36.0.dev0")
92
+
93
+ logger = get_logger(__name__)
94
+
95
+
96
+ def save_model_card(
97
+ repo_id: str,
98
+ use_dora: bool,
99
+ images: list = None,
100
+ base_model: str = None,
101
+ train_text_encoder=False,
102
+ train_text_encoder_ti=False,
103
+ token_abstraction_dict=None,
104
+ instance_prompt=None,
105
+ validation_prompt=None,
106
+ repo_folder=None,
107
+ vae_path=None,
108
+ ):
109
+ lora = "lora" if not use_dora else "dora"
110
+
111
+ widget_dict = []
112
+ if images is not None:
113
+ for i, image in enumerate(images):
114
+ image.save(os.path.join(repo_folder, f"image_{i}.png"))
115
+ widget_dict.append(
116
+ {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"image_{i}.png"}}
117
+ )
118
+ else:
119
+ widget_dict.append({"text": instance_prompt})
120
+ embeddings_filename = f"{repo_folder}_emb"
121
+ instance_prompt_webui = re.sub(r"<s\d+>", "", re.sub(r"<s\d+>", embeddings_filename, instance_prompt, count=1))
122
+ ti_keys = ", ".join(f'"{match}"' for match in re.findall(r"<s\d+>", instance_prompt))
123
+ if instance_prompt_webui != embeddings_filename:
124
+ instance_prompt_sentence = f"For example, `{instance_prompt_webui}`"
125
+ else:
126
+ instance_prompt_sentence = ""
127
+ trigger_str = f"You should use {instance_prompt} to trigger the image generation."
128
+ diffusers_imports_pivotal = ""
129
+ diffusers_example_pivotal = ""
130
+ webui_example_pivotal = ""
131
+ if train_text_encoder_ti:
132
+ trigger_str = (
133
+ "To trigger image generation of trained concept(or concepts) replace each concept identifier "
134
+ "in you prompt with the new inserted tokens:\n"
135
+ )
136
+ diffusers_imports_pivotal = """from huggingface_hub import hf_hub_download
137
+ from safetensors.torch import load_file
138
+ """
139
+ diffusers_example_pivotal = f"""embedding_path = hf_hub_download(repo_id='{repo_id}', filename='{embeddings_filename}.safetensors', repo_type="model")
140
+ state_dict = load_file(embedding_path)
141
+ pipeline.load_textual_inversion(state_dict["clip_l"], token=[{ti_keys}], text_encoder=pipeline.text_encoder, tokenizer=pipeline.tokenizer)
142
+ """
143
+ webui_example_pivotal = f"""- *Embeddings*: download **[`{embeddings_filename}.safetensors` here 💾](/{repo_id}/blob/main/{embeddings_filename}.safetensors)**.
144
+ - Place it on it on your `embeddings` folder
145
+ - Use it by adding `{embeddings_filename}` to your prompt. {instance_prompt_sentence}
146
+ (you need both the LoRA and the embeddings as they were trained together for this LoRA)
147
+ """
148
+ if token_abstraction_dict:
149
+ for key, value in token_abstraction_dict.items():
150
+ tokens = "".join(value)
151
+ trigger_str += f"""
152
+ to trigger concept `{key}` → use `{tokens}` in your prompt \n
153
+ """
154
+ model_description = f"""
155
+ # SD1.5 LoRA DreamBooth - {repo_id}
156
+
157
+ <Gallery />
158
+
159
+ ## Model description
160
+
161
+ ### These are {repo_id} LoRA adaption weights for {base_model}.
162
+
163
+ ## Download model
164
+
165
+ ### Use it with UIs such as AUTOMATIC1111, Comfy UI, SD.Next, Invoke
166
+
167
+ - **LoRA**: download **[`{repo_folder}.safetensors` here 💾](/{repo_id}/blob/main/{repo_folder}.safetensors)**.
168
+ - Place it on your `models/Lora` folder.
169
+ - On AUTOMATIC1111, load the LoRA by adding `<lora:{repo_folder}:1>` to your prompt. On ComfyUI just [load it as a regular LoRA](https://comfyanonymous.github.io/ComfyUI_examples/lora/).
170
+ {webui_example_pivotal}
171
+
172
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
173
+
174
+ ```py
175
+ from diffusers import AutoPipelineForText2Image
176
+ import torch
177
+ {diffusers_imports_pivotal}
178
+ pipeline = AutoPipelineForText2Image.from_pretrained('stable-diffusion-v1-5/stable-diffusion-v1-5', torch_dtype=torch.float16).to('cuda')
179
+ pipeline.load_lora_weights('{repo_id}', weight_name='pytorch_lora_weights.safetensors')
180
+ {diffusers_example_pivotal}
181
+ image = pipeline('{validation_prompt if validation_prompt else instance_prompt}').images[0]
182
+ ```
183
+
184
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
185
+
186
+ ## Trigger words
187
+
188
+ {trigger_str}
189
+
190
+ ## Details
191
+ All [Files & versions](/{repo_id}/tree/main).
192
+
193
+ The weights were trained using [🧨 diffusers Advanced Dreambooth Training Script](https://github.com/huggingface/diffusers/blob/main/examples/advanced_diffusion_training/train_dreambooth_lora_sd15_advanced.py).
194
+
195
+ LoRA for the text encoder was enabled. {train_text_encoder}.
196
+
197
+ Pivotal tuning was enabled: {train_text_encoder_ti}.
198
+
199
+ Special VAE used for training: {vae_path}.
200
+
201
+ """
202
+ model_card = load_or_create_model_card(
203
+ repo_id_or_path=repo_id,
204
+ from_training=True,
205
+ license="openrail++",
206
+ base_model=base_model,
207
+ prompt=instance_prompt,
208
+ model_description=model_description,
209
+ inference=True,
210
+ widget=widget_dict,
211
+ )
212
+
213
+ tags = [
214
+ "text-to-image",
215
+ "diffusers",
216
+ "diffusers-training",
217
+ lora,
218
+ "template:sd-lora",
219
+ "stable-diffusion",
220
+ "stable-diffusion-diffusers",
221
+ ]
222
+ model_card = populate_model_card(model_card, tags=tags)
223
+
224
+ model_card.save(os.path.join(repo_folder, "README.md"))
225
+
226
+
227
+ def import_model_class_from_model_name_or_path(
228
+ pretrained_model_name_or_path: str, revision: str, subfolder: str = "text_encoder"
229
+ ):
230
+ text_encoder_config = PretrainedConfig.from_pretrained(
231
+ pretrained_model_name_or_path, subfolder=subfolder, revision=revision
232
+ )
233
+ model_class = text_encoder_config.architectures[0]
234
+
235
+ if model_class == "CLIPTextModel":
236
+ from transformers import CLIPTextModel
237
+
238
+ return CLIPTextModel
239
+ elif model_class == "CLIPTextModelWithProjection":
240
+ from transformers import CLIPTextModelWithProjection
241
+
242
+ return CLIPTextModelWithProjection
243
+ else:
244
+ raise ValueError(f"{model_class} is not supported.")
245
+
246
+
247
+ def parse_args(input_args=None):
248
+ parser = argparse.ArgumentParser(description="Simple example of a training script.")
249
+ parser.add_argument(
250
+ "--pretrained_model_name_or_path",
251
+ type=str,
252
+ default=None,
253
+ required=True,
254
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
255
+ )
256
+ parser.add_argument(
257
+ "--pretrained_vae_model_name_or_path",
258
+ type=str,
259
+ default=None,
260
+ help="Path to pretrained VAE model with better numerical stability. More details: https://github.com/huggingface/diffusers/pull/4038.",
261
+ )
262
+ parser.add_argument(
263
+ "--revision",
264
+ type=str,
265
+ default=None,
266
+ required=False,
267
+ help="Revision of pretrained model identifier from huggingface.co/models.",
268
+ )
269
+ parser.add_argument(
270
+ "--variant",
271
+ type=str,
272
+ default=None,
273
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
274
+ )
275
+ parser.add_argument(
276
+ "--dataset_name",
277
+ type=str,
278
+ default=None,
279
+ help=(
280
+ "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
281
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
282
+ " or to a folder containing files that 🤗 Datasets can understand.To load the custom captions, the training set directory needs to follow the structure of a "
283
+ "datasets ImageFolder, containing both the images and the corresponding caption for each image. see: "
284
+ "https://huggingface.co/docs/datasets/image_dataset for more information"
285
+ ),
286
+ )
287
+ parser.add_argument(
288
+ "--dataset_config_name",
289
+ type=str,
290
+ default=None,
291
+ help="The config of the Dataset. In some cases, a dataset may have more than one configuration (for example "
292
+ "if it contains different subsets of data within, and you only wish to load a specific subset - in that case specify the desired configuration using --dataset_config_name. Leave as "
293
+ "None if there's only one config.",
294
+ )
295
+ parser.add_argument(
296
+ "--instance_data_dir",
297
+ type=str,
298
+ default=None,
299
+ help="A path to local folder containing the training data of instance images. Specify this arg instead of "
300
+ "--dataset_name if you wish to train using a local folder without custom captions. If you wish to train with custom captions please specify "
301
+ "--dataset_name instead.",
302
+ )
303
+
304
+ parser.add_argument(
305
+ "--cache_dir",
306
+ type=str,
307
+ default=None,
308
+ help="The directory where the downloaded models and datasets will be stored.",
309
+ )
310
+
311
+ parser.add_argument(
312
+ "--image_column",
313
+ type=str,
314
+ default="image",
315
+ help="The column of the dataset containing the target image. By "
316
+ "default, the standard Image Dataset maps out 'file_name' "
317
+ "to 'image'.",
318
+ )
319
+ parser.add_argument(
320
+ "--caption_column",
321
+ type=str,
322
+ default=None,
323
+ help="The column of the dataset containing the instance prompt for each image",
324
+ )
325
+
326
+ parser.add_argument("--repeats", type=int, default=1, help="How many times to repeat the training data.")
327
+
328
+ parser.add_argument(
329
+ "--class_data_dir",
330
+ type=str,
331
+ default=None,
332
+ required=False,
333
+ help="A folder containing the training data of class images.",
334
+ )
335
+ parser.add_argument(
336
+ "--instance_prompt",
337
+ type=str,
338
+ default=None,
339
+ required=True,
340
+ help="The prompt with identifier specifying the instance, e.g. 'photo of a TOK dog', 'in the style of TOK'",
341
+ )
342
+ parser.add_argument(
343
+ "--token_abstraction",
344
+ type=str,
345
+ default="TOK",
346
+ help="identifier specifying the instance(or instances) as used in instance_prompt, validation prompt, "
347
+ "captions - e.g. TOK. To use multiple identifiers, please specify them in a comma separated string - e.g. "
348
+ "'TOK,TOK2,TOK3' etc.",
349
+ )
350
+
351
+ parser.add_argument(
352
+ "--num_new_tokens_per_abstraction",
353
+ type=int,
354
+ default=2,
355
+ help="number of new tokens inserted to the tokenizers per token_abstraction identifier when "
356
+ "--train_text_encoder_ti = True. By default, each --token_abstraction (e.g. TOK) is mapped to 2 new "
357
+ "tokens - <si><si+1> ",
358
+ )
359
+
360
+ parser.add_argument(
361
+ "--class_prompt",
362
+ type=str,
363
+ default=None,
364
+ help="The prompt to specify images in the same class as provided instance images.",
365
+ )
366
+ parser.add_argument(
367
+ "--validation_prompt",
368
+ type=str,
369
+ default=None,
370
+ help="A prompt that is used during validation to verify that the model is learning.",
371
+ )
372
+ parser.add_argument(
373
+ "--num_validation_images",
374
+ type=int,
375
+ default=4,
376
+ help="Number of images that should be generated during validation with `validation_prompt`.",
377
+ )
378
+ parser.add_argument(
379
+ "--validation_epochs",
380
+ type=int,
381
+ default=50,
382
+ help=(
383
+ "Run dreambooth validation every X epochs. Dreambooth validation consists of running the prompt"
384
+ " `args.validation_prompt` multiple times: `args.num_validation_images`."
385
+ ),
386
+ )
387
+ parser.add_argument(
388
+ "--with_prior_preservation",
389
+ default=False,
390
+ action="store_true",
391
+ help="Flag to add prior preservation loss.",
392
+ )
393
+ parser.add_argument("--prior_loss_weight", type=float, default=1.0, help="The weight of prior preservation loss.")
394
+ parser.add_argument(
395
+ "--num_class_images",
396
+ type=int,
397
+ default=100,
398
+ help=(
399
+ "Minimal class images for prior preservation loss. If there are not enough images already present in"
400
+ " class_data_dir, additional images will be sampled with class_prompt."
401
+ ),
402
+ )
403
+ parser.add_argument(
404
+ "--output_dir",
405
+ type=str,
406
+ default="lora-dreambooth-model",
407
+ help="The output directory where the model predictions and checkpoints will be written.",
408
+ )
409
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
410
+ parser.add_argument(
411
+ "--resolution",
412
+ type=int,
413
+ default=512,
414
+ help=(
415
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
416
+ " resolution"
417
+ ),
418
+ )
419
+ parser.add_argument(
420
+ "--center_crop",
421
+ default=False,
422
+ action="store_true",
423
+ help=(
424
+ "Whether to center crop the input images to the resolution. If not set, the images will be randomly"
425
+ " cropped. The images will be resized to the resolution first before cropping."
426
+ ),
427
+ )
428
+ parser.add_argument(
429
+ "--train_text_encoder",
430
+ action="store_true",
431
+ help="Whether to train the text encoder. If set, the text encoder should be float32 precision.",
432
+ )
433
+ parser.add_argument(
434
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
435
+ )
436
+ parser.add_argument(
437
+ "--sample_batch_size", type=int, default=4, help="Batch size (per device) for sampling images."
438
+ )
439
+ parser.add_argument("--num_train_epochs", type=int, default=1)
440
+ parser.add_argument(
441
+ "--max_train_steps",
442
+ type=int,
443
+ default=None,
444
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
445
+ )
446
+ parser.add_argument(
447
+ "--checkpointing_steps",
448
+ type=int,
449
+ default=500,
450
+ help=(
451
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
452
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
453
+ " training using `--resume_from_checkpoint`."
454
+ ),
455
+ )
456
+ parser.add_argument(
457
+ "--checkpoints_total_limit",
458
+ type=int,
459
+ default=None,
460
+ help=("Max number of checkpoints to store."),
461
+ )
462
+ parser.add_argument(
463
+ "--resume_from_checkpoint",
464
+ type=str,
465
+ default=None,
466
+ help=(
467
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
468
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
469
+ ),
470
+ )
471
+ parser.add_argument(
472
+ "--gradient_accumulation_steps",
473
+ type=int,
474
+ default=1,
475
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
476
+ )
477
+ parser.add_argument(
478
+ "--gradient_checkpointing",
479
+ action="store_true",
480
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
481
+ )
482
+ parser.add_argument(
483
+ "--learning_rate",
484
+ type=float,
485
+ default=1e-4,
486
+ help="Initial learning rate (after the potential warmup period) to use.",
487
+ )
488
+
489
+ parser.add_argument(
490
+ "--text_encoder_lr",
491
+ type=float,
492
+ default=5e-6,
493
+ help="Text encoder learning rate to use.",
494
+ )
495
+ parser.add_argument(
496
+ "--scale_lr",
497
+ action="store_true",
498
+ default=False,
499
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
500
+ )
501
+ parser.add_argument(
502
+ "--lr_scheduler",
503
+ type=str,
504
+ default="constant",
505
+ help=(
506
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
507
+ ' "constant", "constant_with_warmup"]'
508
+ ),
509
+ )
510
+
511
+ parser.add_argument(
512
+ "--snr_gamma",
513
+ type=float,
514
+ default=None,
515
+ help="SNR weighting gamma to be used if rebalancing the loss. Recommended value is 5.0. "
516
+ "More details here: https://huggingface.co/papers/2303.09556.",
517
+ )
518
+ parser.add_argument(
519
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
520
+ )
521
+ parser.add_argument(
522
+ "--lr_num_cycles",
523
+ type=int,
524
+ default=1,
525
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
526
+ )
527
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
528
+ parser.add_argument(
529
+ "--dataloader_num_workers",
530
+ type=int,
531
+ default=0,
532
+ help=(
533
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
534
+ ),
535
+ )
536
+
537
+ parser.add_argument(
538
+ "--train_text_encoder_ti",
539
+ action="store_true",
540
+ help=("Whether to use textual inversion"),
541
+ )
542
+
543
+ parser.add_argument(
544
+ "--train_text_encoder_ti_frac",
545
+ type=float,
546
+ default=0.5,
547
+ help=("The percentage of epochs to perform textual inversion"),
548
+ )
549
+
550
+ parser.add_argument(
551
+ "--train_text_encoder_frac",
552
+ type=float,
553
+ default=1.0,
554
+ help=("The percentage of epochs to perform text encoder tuning"),
555
+ )
556
+
557
+ parser.add_argument(
558
+ "--optimizer",
559
+ type=str,
560
+ default="adamW",
561
+ help=('The optimizer type to use. Choose between ["AdamW", "prodigy"]'),
562
+ )
563
+
564
+ parser.add_argument(
565
+ "--use_8bit_adam",
566
+ action="store_true",
567
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
568
+ )
569
+
570
+ parser.add_argument(
571
+ "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers."
572
+ )
573
+ parser.add_argument(
574
+ "--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam and Prodigy optimizers."
575
+ )
576
+ parser.add_argument(
577
+ "--prodigy_beta3",
578
+ type=float,
579
+ default=None,
580
+ help="coefficients for computing the Prodigy stepsize using running averages. If set to None, "
581
+ "uses the value of square root of beta2. Ignored if optimizer is adamW",
582
+ )
583
+ parser.add_argument("--prodigy_decouple", type=bool, default=True, help="Use AdamW style decoupled weight decay")
584
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
585
+ parser.add_argument(
586
+ "--adam_weight_decay_text_encoder", type=float, default=None, help="Weight decay to use for text_encoder"
587
+ )
588
+
589
+ parser.add_argument(
590
+ "--adam_epsilon",
591
+ type=float,
592
+ default=1e-08,
593
+ help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
594
+ )
595
+
596
+ parser.add_argument(
597
+ "--prodigy_use_bias_correction",
598
+ type=bool,
599
+ default=True,
600
+ help="Turn on Adam's bias correction. True by default. Ignored if optimizer is adamW",
601
+ )
602
+ parser.add_argument(
603
+ "--prodigy_safeguard_warmup",
604
+ type=bool,
605
+ default=True,
606
+ help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage. True by default. "
607
+ "Ignored if optimizer is adamW",
608
+ )
609
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
610
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
611
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
612
+ parser.add_argument(
613
+ "--hub_model_id",
614
+ type=str,
615
+ default=None,
616
+ help="The name of the repository to keep in sync with the local `output_dir`.",
617
+ )
618
+ parser.add_argument(
619
+ "--logging_dir",
620
+ type=str,
621
+ default="logs",
622
+ help=(
623
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
624
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
625
+ ),
626
+ )
627
+ parser.add_argument(
628
+ "--allow_tf32",
629
+ action="store_true",
630
+ help=(
631
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
632
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
633
+ ),
634
+ )
635
+ parser.add_argument(
636
+ "--report_to",
637
+ type=str,
638
+ default="tensorboard",
639
+ help=(
640
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
641
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
642
+ ),
643
+ )
644
+ parser.add_argument(
645
+ "--mixed_precision",
646
+ type=str,
647
+ default=None,
648
+ choices=["no", "fp16", "bf16"],
649
+ help=(
650
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
651
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
652
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
653
+ ),
654
+ )
655
+ parser.add_argument(
656
+ "--prior_generation_precision",
657
+ type=str,
658
+ default=None,
659
+ choices=["no", "fp32", "fp16", "bf16"],
660
+ help=(
661
+ "Choose prior generation precision between fp32, fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
662
+ " 1.10.and an Nvidia Ampere GPU. Default to fp16 if a GPU is available else fp32."
663
+ ),
664
+ )
665
+ parser.add_argument("--local_rank", type=int, default=-1, help="For distributed training: local_rank")
666
+ parser.add_argument(
667
+ "--enable_xformers_memory_efficient_attention", action="store_true", help="Whether or not to use xformers."
668
+ )
669
+ parser.add_argument("--noise_offset", type=float, default=0, help="The scale of noise offset.")
670
+ parser.add_argument(
671
+ "--rank",
672
+ type=int,
673
+ default=4,
674
+ help=("The dimension of the LoRA update matrices."),
675
+ )
676
+ parser.add_argument("--lora_dropout", type=float, default=0.0, help="Dropout probability for LoRA layers")
677
+
678
+ parser.add_argument(
679
+ "--use_dora",
680
+ action="store_true",
681
+ default=False,
682
+ help=(
683
+ "Whether to train a DoRA as proposed in- DoRA: Weight-Decomposed Low-Rank Adaptation https://huggingface.co/papers/2402.09353. "
684
+ "Note: to use DoRA you need to install peft from main, `pip install git+https://github.com/huggingface/peft.git`"
685
+ ),
686
+ )
687
+ parser.add_argument(
688
+ "--cache_latents",
689
+ action="store_true",
690
+ default=False,
691
+ help="Cache the VAE latents",
692
+ )
693
+ parser.add_argument(
694
+ "--image_interpolation_mode",
695
+ type=str,
696
+ default="lanczos",
697
+ choices=[
698
+ f.lower() for f in dir(transforms.InterpolationMode) if not f.startswith("__") and not f.endswith("__")
699
+ ],
700
+ help="The image interpolation method to use for resizing images.",
701
+ )
702
+
703
+ if input_args is not None:
704
+ args = parser.parse_args(input_args)
705
+ else:
706
+ args = parser.parse_args()
707
+
708
+ if args.dataset_name is None and args.instance_data_dir is None:
709
+ raise ValueError("Specify either `--dataset_name` or `--instance_data_dir`")
710
+
711
+ if args.dataset_name is not None and args.instance_data_dir is not None:
712
+ raise ValueError("Specify only one of `--dataset_name` or `--instance_data_dir`")
713
+
714
+ if args.train_text_encoder and args.train_text_encoder_ti:
715
+ raise ValueError(
716
+ "Specify only one of `--train_text_encoder` or `--train_text_encoder_ti. "
717
+ "For full LoRA text encoder training check --train_text_encoder, for textual "
718
+ "inversion training check `--train_text_encoder_ti`"
719
+ )
720
+
721
+ env_local_rank = int(os.environ.get("LOCAL_RANK", -1))
722
+ if env_local_rank != -1 and env_local_rank != args.local_rank:
723
+ args.local_rank = env_local_rank
724
+
725
+ if args.with_prior_preservation:
726
+ if args.class_data_dir is None:
727
+ raise ValueError("You must specify a data directory for class images.")
728
+ if args.class_prompt is None:
729
+ raise ValueError("You must specify prompt for class images.")
730
+ else:
731
+ # logger is not available yet
732
+ if args.class_data_dir is not None:
733
+ warnings.warn("You need not use --class_data_dir without --with_prior_preservation.")
734
+ if args.class_prompt is not None:
735
+ warnings.warn("You need not use --class_prompt without --with_prior_preservation.")
736
+
737
+ return args
738
+
739
+
740
+ # Taken from https://github.com/replicate/cog-sdxl/blob/main/dataset_and_utils.py
741
+ class TokenEmbeddingsHandler:
742
+ def __init__(self, text_encoders, tokenizers):
743
+ self.text_encoders = text_encoders
744
+ self.tokenizers = tokenizers
745
+
746
+ self.train_ids: Optional[torch.Tensor] = None
747
+ self.inserting_toks: Optional[List[str]] = None
748
+ self.embeddings_settings = {}
749
+
750
+ def initialize_new_tokens(self, inserting_toks: List[str]):
751
+ idx = 0
752
+ for tokenizer, text_encoder in zip(self.tokenizers, self.text_encoders):
753
+ assert isinstance(inserting_toks, list), "inserting_toks should be a list of strings."
754
+ assert all(isinstance(tok, str) for tok in inserting_toks), (
755
+ "All elements in inserting_toks should be strings."
756
+ )
757
+
758
+ self.inserting_toks = inserting_toks
759
+ special_tokens_dict = {"additional_special_tokens": self.inserting_toks}
760
+ tokenizer.add_special_tokens(special_tokens_dict)
761
+ text_encoder.resize_token_embeddings(len(tokenizer))
762
+
763
+ self.train_ids = tokenizer.convert_tokens_to_ids(self.inserting_toks)
764
+
765
+ # random initialization of new tokens
766
+ std_token_embedding = text_encoder.text_model.embeddings.token_embedding.weight.data.std()
767
+
768
+ print(f"{idx} text encoder's std_token_embedding: {std_token_embedding}")
769
+
770
+ text_encoder.text_model.embeddings.token_embedding.weight.data[self.train_ids] = (
771
+ torch.randn(len(self.train_ids), text_encoder.text_model.config.hidden_size)
772
+ .to(device=self.device)
773
+ .to(dtype=self.dtype)
774
+ * std_token_embedding
775
+ )
776
+ self.embeddings_settings[f"original_embeddings_{idx}"] = (
777
+ text_encoder.text_model.embeddings.token_embedding.weight.data.clone()
778
+ )
779
+ self.embeddings_settings[f"std_token_embedding_{idx}"] = std_token_embedding
780
+
781
+ inu = torch.ones((len(tokenizer),), dtype=torch.bool)
782
+ inu[self.train_ids] = False
783
+
784
+ self.embeddings_settings[f"index_no_updates_{idx}"] = inu
785
+
786
+ print(self.embeddings_settings[f"index_no_updates_{idx}"].shape)
787
+
788
+ idx += 1
789
+
790
+ # Copied from train_dreambooth_lora_sdxl_advanced.py
791
+ def save_embeddings(self, file_path: str):
792
+ assert self.train_ids is not None, "Initialize new tokens before saving embeddings."
793
+ tensors = {}
794
+ # text_encoder_0 - CLIP ViT-L/14, text_encoder_1 - CLIP ViT-G/14 - TODO - change for sd
795
+ idx_to_text_encoder_name = {0: "clip_l", 1: "clip_g"}
796
+ for idx, text_encoder in enumerate(self.text_encoders):
797
+ assert text_encoder.text_model.embeddings.token_embedding.weight.data.shape[0] == len(
798
+ self.tokenizers[0]
799
+ ), "Tokenizers should be the same."
800
+ new_token_embeddings = text_encoder.text_model.embeddings.token_embedding.weight.data[self.train_ids]
801
+
802
+ # New tokens for each text encoder are saved under "clip_l" (for text_encoder 0), "clip_g" (for
803
+ # text_encoder 1) to keep compatible with the ecosystem.
804
+ # Note: When loading with diffusers, any name can work - simply specify in inference
805
+ tensors[idx_to_text_encoder_name[idx]] = new_token_embeddings
806
+ # tensors[f"text_encoders_{idx}"] = new_token_embeddings
807
+
808
+ save_file(tensors, file_path)
809
+
810
+ @property
811
+ def dtype(self):
812
+ return self.text_encoders[0].dtype
813
+
814
+ @property
815
+ def device(self):
816
+ return self.text_encoders[0].device
817
+
818
+ @torch.no_grad()
819
+ def retract_embeddings(self):
820
+ for idx, text_encoder in enumerate(self.text_encoders):
821
+ index_no_updates = self.embeddings_settings[f"index_no_updates_{idx}"]
822
+ text_encoder.text_model.embeddings.token_embedding.weight.data[index_no_updates] = (
823
+ self.embeddings_settings[f"original_embeddings_{idx}"][index_no_updates]
824
+ .to(device=text_encoder.device)
825
+ .to(dtype=text_encoder.dtype)
826
+ )
827
+
828
+ # for the parts that were updated, we need to normalize them
829
+ # to have the same std as before
830
+ std_token_embedding = self.embeddings_settings[f"std_token_embedding_{idx}"]
831
+
832
+ index_updates = ~index_no_updates
833
+ new_embeddings = text_encoder.text_model.embeddings.token_embedding.weight.data[index_updates]
834
+ off_ratio = std_token_embedding / new_embeddings.std()
835
+
836
+ new_embeddings = new_embeddings * (off_ratio**0.1)
837
+ text_encoder.text_model.embeddings.token_embedding.weight.data[index_updates] = new_embeddings
838
+
839
+
840
+ class DreamBoothDataset(Dataset):
841
+ """
842
+ A dataset to prepare the instance and class images with the prompts for fine-tuning the model.
843
+ It pre-processes the images.
844
+ """
845
+
846
+ def __init__(
847
+ self,
848
+ instance_data_root,
849
+ instance_prompt,
850
+ class_prompt,
851
+ dataset_name,
852
+ dataset_config_name,
853
+ cache_dir,
854
+ image_column,
855
+ caption_column,
856
+ train_text_encoder_ti,
857
+ class_data_root=None,
858
+ class_num=None,
859
+ token_abstraction_dict=None, # token mapping for textual inversion
860
+ size=1024,
861
+ repeats=1,
862
+ center_crop=False,
863
+ ):
864
+ self.size = size
865
+ self.center_crop = center_crop
866
+
867
+ self.instance_prompt = instance_prompt
868
+ self.custom_instance_prompts = None
869
+ self.class_prompt = class_prompt
870
+ self.token_abstraction_dict = token_abstraction_dict
871
+ self.train_text_encoder_ti = train_text_encoder_ti
872
+ # if --dataset_name is provided or a metadata jsonl file is provided in the local --instance_data directory,
873
+ # we load the training data using load_dataset
874
+ if dataset_name is not None:
875
+ try:
876
+ from datasets import load_dataset
877
+ except ImportError:
878
+ raise ImportError(
879
+ "You are trying to load your data using the datasets library. If you wish to train using custom "
880
+ "captions please install the datasets library: `pip install datasets`. If you wish to load a "
881
+ "local folder containing images only, specify --instance_data_dir instead."
882
+ )
883
+ # Downloading and loading a dataset from the hub.
884
+ # See more about loading custom images at
885
+ # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
886
+ dataset = load_dataset(
887
+ dataset_name,
888
+ dataset_config_name,
889
+ cache_dir=cache_dir,
890
+ )
891
+ # Preprocessing the datasets.
892
+ column_names = dataset["train"].column_names
893
+
894
+ # 6. Get the column names for input/target.
895
+ if image_column is None:
896
+ image_column = column_names[0]
897
+ logger.info(f"image column defaulting to {image_column}")
898
+ else:
899
+ if image_column not in column_names:
900
+ raise ValueError(
901
+ f"`--image_column` value '{image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
902
+ )
903
+ instance_images = dataset["train"][image_column]
904
+
905
+ if caption_column is None:
906
+ logger.info(
907
+ "No caption column provided, defaulting to instance_prompt for all images. If your dataset "
908
+ "contains captions/prompts for the images, make sure to specify the "
909
+ "column as --caption_column"
910
+ )
911
+ self.custom_instance_prompts = None
912
+ else:
913
+ if caption_column not in column_names:
914
+ raise ValueError(
915
+ f"`--caption_column` value '{caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
916
+ )
917
+ custom_instance_prompts = dataset["train"][caption_column]
918
+ # create final list of captions according to --repeats
919
+ self.custom_instance_prompts = []
920
+ for caption in custom_instance_prompts:
921
+ self.custom_instance_prompts.extend(itertools.repeat(caption, repeats))
922
+ else:
923
+ self.instance_data_root = Path(instance_data_root)
924
+ if not self.instance_data_root.exists():
925
+ raise ValueError("Instance images root doesn't exists.")
926
+
927
+ instance_images = [Image.open(path) for path in list(Path(instance_data_root).iterdir())]
928
+ self.custom_instance_prompts = None
929
+
930
+ self.instance_images = []
931
+ for img in instance_images:
932
+ self.instance_images.extend(itertools.repeat(img, repeats))
933
+ self.num_instance_images = len(self.instance_images)
934
+ self._length = self.num_instance_images
935
+
936
+ interpolation = getattr(transforms.InterpolationMode, args.image_interpolation_mode.upper(), None)
937
+ if interpolation is None:
938
+ raise ValueError(f"Unsupported interpolation mode {interpolation=}.")
939
+
940
+ if class_data_root is not None:
941
+ self.class_data_root = Path(class_data_root)
942
+ self.class_data_root.mkdir(parents=True, exist_ok=True)
943
+ self.class_images_path = list(self.class_data_root.iterdir())
944
+ if class_num is not None:
945
+ self.num_class_images = min(len(self.class_images_path), class_num)
946
+ else:
947
+ self.num_class_images = len(self.class_images_path)
948
+ self._length = max(self.num_class_images, self.num_instance_images)
949
+ else:
950
+ self.class_data_root = None
951
+
952
+ self.image_transforms = transforms.Compose(
953
+ [
954
+ transforms.Resize(size, interpolation=interpolation),
955
+ transforms.CenterCrop(size) if center_crop else transforms.RandomCrop(size),
956
+ transforms.ToTensor(),
957
+ transforms.Normalize([0.5], [0.5]),
958
+ ]
959
+ )
960
+
961
+ def __len__(self):
962
+ return self._length
963
+
964
+ def __getitem__(self, index):
965
+ example = {}
966
+ instance_image = self.instance_images[index % self.num_instance_images]
967
+ instance_image = exif_transpose(instance_image)
968
+
969
+ if not instance_image.mode == "RGB":
970
+ instance_image = instance_image.convert("RGB")
971
+ example["instance_images"] = self.image_transforms(instance_image)
972
+
973
+ if self.custom_instance_prompts:
974
+ caption = self.custom_instance_prompts[index % self.num_instance_images]
975
+ if caption:
976
+ if self.train_text_encoder_ti:
977
+ # replace instances of --token_abstraction in caption with the new tokens: "<si><si+1>" etc.
978
+ for token_abs, token_replacement in self.token_abstraction_dict.items():
979
+ caption = caption.replace(token_abs, "".join(token_replacement))
980
+ example["instance_prompt"] = caption
981
+ else:
982
+ example["instance_prompt"] = self.instance_prompt
983
+
984
+ else: # custom prompts were provided, but length does not match size of image dataset
985
+ example["instance_prompt"] = self.instance_prompt
986
+
987
+ if self.class_data_root:
988
+ class_image = Image.open(self.class_images_path[index % self.num_class_images])
989
+ class_image = exif_transpose(class_image)
990
+
991
+ if not class_image.mode == "RGB":
992
+ class_image = class_image.convert("RGB")
993
+ example["class_images"] = self.image_transforms(class_image)
994
+ example["class_prompt"] = self.class_prompt
995
+
996
+ return example
997
+
998
+
999
+ def collate_fn(examples, with_prior_preservation=False):
1000
+ pixel_values = [example["instance_images"] for example in examples]
1001
+ prompts = [example["instance_prompt"] for example in examples]
1002
+
1003
+ # Concat class and instance examples for prior preservation.
1004
+ # We do this to avoid doing two forward passes.
1005
+ if with_prior_preservation:
1006
+ pixel_values += [example["class_images"] for example in examples]
1007
+ prompts += [example["class_prompt"] for example in examples]
1008
+
1009
+ pixel_values = torch.stack(pixel_values)
1010
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
1011
+
1012
+ batch = {"pixel_values": pixel_values, "prompts": prompts}
1013
+ return batch
1014
+
1015
+
1016
+ class PromptDataset(Dataset):
1017
+ """A simple dataset to prepare the prompts to generate class images on multiple GPUs."""
1018
+
1019
+ def __init__(self, prompt, num_samples):
1020
+ self.prompt = prompt
1021
+ self.num_samples = num_samples
1022
+
1023
+ def __len__(self):
1024
+ return self.num_samples
1025
+
1026
+ def __getitem__(self, index):
1027
+ example = {}
1028
+ example["prompt"] = self.prompt
1029
+ example["index"] = index
1030
+ return example
1031
+
1032
+
1033
+ def tokenize_prompt(tokenizer, prompt, add_special_tokens=False):
1034
+ text_inputs = tokenizer(
1035
+ prompt,
1036
+ padding="max_length",
1037
+ max_length=tokenizer.model_max_length,
1038
+ truncation=True,
1039
+ add_special_tokens=add_special_tokens,
1040
+ return_tensors="pt",
1041
+ )
1042
+ text_input_ids = text_inputs.input_ids
1043
+ return text_input_ids
1044
+
1045
+
1046
+ # Adapted from pipelines.StableDiffusionXLPipeline.encode_prompt
1047
+ def encode_prompt(text_encoders, tokenizers, prompt, text_input_ids_list=None):
1048
+ for i, text_encoder in enumerate(text_encoders):
1049
+ if tokenizers is not None:
1050
+ tokenizer = tokenizers[i]
1051
+ text_input_ids = tokenize_prompt(tokenizer, prompt)
1052
+ else:
1053
+ assert text_input_ids_list is not None
1054
+ text_input_ids = text_input_ids_list[i]
1055
+
1056
+ prompt_embeds = text_encoder(
1057
+ text_input_ids.to(text_encoder.device),
1058
+ output_hidden_states=True,
1059
+ )
1060
+
1061
+ return prompt_embeds[0]
1062
+
1063
+
1064
+ def main(args):
1065
+ if args.report_to == "wandb" and args.hub_token is not None:
1066
+ raise ValueError(
1067
+ "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
1068
+ " Please use `hf auth login` to authenticate with the Hub."
1069
+ )
1070
+
1071
+ logging_dir = Path(args.output_dir, args.logging_dir)
1072
+
1073
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
1074
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
1075
+ accelerator = Accelerator(
1076
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
1077
+ mixed_precision=args.mixed_precision,
1078
+ log_with=args.report_to,
1079
+ project_config=accelerator_project_config,
1080
+ kwargs_handlers=[kwargs],
1081
+ )
1082
+
1083
+ if args.report_to == "wandb":
1084
+ if not is_wandb_available():
1085
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
1086
+ import wandb
1087
+
1088
+ # Make one log on every process with the configuration for debugging.
1089
+ logging.basicConfig(
1090
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
1091
+ datefmt="%m/%d/%Y %H:%M:%S",
1092
+ level=logging.INFO,
1093
+ )
1094
+ logger.info(accelerator.state, main_process_only=False)
1095
+ if accelerator.is_local_main_process:
1096
+ transformers.utils.logging.set_verbosity_warning()
1097
+ diffusers.utils.logging.set_verbosity_info()
1098
+ else:
1099
+ transformers.utils.logging.set_verbosity_error()
1100
+ diffusers.utils.logging.set_verbosity_error()
1101
+
1102
+ # If passed along, set the training seed now.
1103
+ if args.seed is not None:
1104
+ set_seed(args.seed)
1105
+
1106
+ # Generate class images if prior preservation is enabled.
1107
+ if args.with_prior_preservation:
1108
+ class_images_dir = Path(args.class_data_dir)
1109
+ if not class_images_dir.exists():
1110
+ class_images_dir.mkdir(parents=True)
1111
+ cur_class_images = len(list(class_images_dir.iterdir()))
1112
+
1113
+ if cur_class_images < args.num_class_images:
1114
+ torch_dtype = torch.float16 if accelerator.device.type == "cuda" else torch.float32
1115
+ if args.prior_generation_precision == "fp32":
1116
+ torch_dtype = torch.float32
1117
+ elif args.prior_generation_precision == "fp16":
1118
+ torch_dtype = torch.float16
1119
+ elif args.prior_generation_precision == "bf16":
1120
+ torch_dtype = torch.bfloat16
1121
+ pipeline = StableDiffusionPipeline.from_pretrained(
1122
+ args.pretrained_model_name_or_path,
1123
+ torch_dtype=torch_dtype,
1124
+ revision=args.revision,
1125
+ variant=args.variant,
1126
+ )
1127
+ pipeline.set_progress_bar_config(disable=True)
1128
+
1129
+ num_new_images = args.num_class_images - cur_class_images
1130
+ logger.info(f"Number of class images to sample: {num_new_images}.")
1131
+
1132
+ sample_dataset = PromptDataset(args.class_prompt, num_new_images)
1133
+ sample_dataloader = torch.utils.data.DataLoader(sample_dataset, batch_size=args.sample_batch_size)
1134
+
1135
+ sample_dataloader = accelerator.prepare(sample_dataloader)
1136
+ pipeline.to(accelerator.device)
1137
+
1138
+ for example in tqdm(
1139
+ sample_dataloader, desc="Generating class images", disable=not accelerator.is_local_main_process
1140
+ ):
1141
+ images = pipeline(example["prompt"]).images
1142
+
1143
+ for i, image in enumerate(images):
1144
+ hash_image = hashlib.sha1(image.tobytes()).hexdigest()
1145
+ image_filename = class_images_dir / f"{example['index'][i] + cur_class_images}-{hash_image}.jpg"
1146
+ image.save(image_filename)
1147
+
1148
+ del pipeline
1149
+ if torch.cuda.is_available():
1150
+ torch.cuda.empty_cache()
1151
+
1152
+ # Handle the repository creation
1153
+ if accelerator.is_main_process:
1154
+ if args.output_dir is not None:
1155
+ os.makedirs(args.output_dir, exist_ok=True)
1156
+
1157
+ model_id = args.hub_model_id or Path(args.output_dir).name
1158
+ repo_id = None
1159
+ if args.push_to_hub:
1160
+ repo_id = create_repo(repo_id=model_id, exist_ok=True, token=args.hub_token).repo_id
1161
+
1162
+ # Load the tokenizers
1163
+ tokenizer_one = AutoTokenizer.from_pretrained(
1164
+ args.pretrained_model_name_or_path,
1165
+ subfolder="tokenizer",
1166
+ revision=args.revision,
1167
+ variant=args.variant,
1168
+ use_fast=False,
1169
+ )
1170
+
1171
+ # import correct text encoder classes
1172
+ text_encoder_cls_one = import_model_class_from_model_name_or_path(
1173
+ args.pretrained_model_name_or_path, args.revision
1174
+ )
1175
+
1176
+ # Load scheduler and models
1177
+ noise_scheduler = DDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
1178
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1179
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
1180
+ )
1181
+ vae_path = (
1182
+ args.pretrained_model_name_or_path
1183
+ if args.pretrained_vae_model_name_or_path is None
1184
+ else args.pretrained_vae_model_name_or_path
1185
+ )
1186
+ vae = AutoencoderKL.from_pretrained(
1187
+ vae_path,
1188
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
1189
+ revision=args.revision,
1190
+ variant=args.variant,
1191
+ )
1192
+ vae_scaling_factor = vae.config.scaling_factor
1193
+ unet = UNet2DConditionModel.from_pretrained(
1194
+ args.pretrained_model_name_or_path, subfolder="unet", revision=args.revision, variant=args.variant
1195
+ )
1196
+
1197
+ if args.train_text_encoder_ti:
1198
+ # we parse the provided token identifier (or identifiers) into a list. s.t. - "TOK" -> ["TOK"], "TOK,
1199
+ # TOK2" -> ["TOK", "TOK2"] etc.
1200
+ token_abstraction_list = "".join(args.token_abstraction.split()).split(",")
1201
+ logger.info(f"list of token identifiers: {token_abstraction_list}")
1202
+
1203
+ token_abstraction_dict = {}
1204
+ token_idx = 0
1205
+ for i, token in enumerate(token_abstraction_list):
1206
+ token_abstraction_dict[token] = [
1207
+ f"<s{token_idx + i + j}>" for j in range(args.num_new_tokens_per_abstraction)
1208
+ ]
1209
+ token_idx += args.num_new_tokens_per_abstraction - 1
1210
+
1211
+ # replace instances of --token_abstraction in --instance_prompt with the new tokens: "<si><si+1>" etc.
1212
+ for token_abs, token_replacement in token_abstraction_dict.items():
1213
+ args.instance_prompt = args.instance_prompt.replace(token_abs, "".join(token_replacement))
1214
+ if args.with_prior_preservation:
1215
+ args.class_prompt = args.class_prompt.replace(token_abs, "".join(token_replacement))
1216
+
1217
+ # initialize the new tokens for textual inversion
1218
+ embedding_handler = TokenEmbeddingsHandler([text_encoder_one], [tokenizer_one])
1219
+ inserting_toks = []
1220
+ for new_tok in token_abstraction_dict.values():
1221
+ inserting_toks.extend(new_tok)
1222
+ embedding_handler.initialize_new_tokens(inserting_toks=inserting_toks)
1223
+
1224
+ # We only train the additional adapter LoRA layers
1225
+ vae.requires_grad_(False)
1226
+ text_encoder_one.requires_grad_(False)
1227
+ unet.requires_grad_(False)
1228
+
1229
+ # For mixed precision training we cast all non-trainable weights (vae, non-lora text_encoder and non-lora unet) to half-precision
1230
+ # as these weights are only used for inference, keeping weights in full precision is not required.
1231
+ weight_dtype = torch.float32
1232
+ if accelerator.mixed_precision == "fp16":
1233
+ weight_dtype = torch.float16
1234
+ elif accelerator.mixed_precision == "bf16":
1235
+ weight_dtype = torch.bfloat16
1236
+
1237
+ # Move unet, vae and text_encoder to device and cast to weight_dtype
1238
+ unet.to(accelerator.device, dtype=weight_dtype)
1239
+
1240
+ # The VAE is always in float32 to avoid NaN losses.
1241
+ vae.to(accelerator.device, dtype=torch.float32)
1242
+
1243
+ text_encoder_one.to(accelerator.device, dtype=weight_dtype)
1244
+
1245
+ if args.enable_xformers_memory_efficient_attention:
1246
+ if is_xformers_available():
1247
+ import xformers
1248
+
1249
+ xformers_version = version.parse(xformers.__version__)
1250
+ if xformers_version == version.parse("0.0.16"):
1251
+ logger.warning(
1252
+ "xFormers 0.0.16 cannot be used for training in some GPUs. If you observe problems during training, "
1253
+ "please update xFormers to at least 0.0.17. See https://huggingface.co/docs/diffusers/main/en/optimization/xformers for more details."
1254
+ )
1255
+ unet.enable_xformers_memory_efficient_attention()
1256
+ else:
1257
+ raise ValueError("xformers is not available. Make sure it is installed correctly")
1258
+
1259
+ if args.gradient_checkpointing:
1260
+ unet.enable_gradient_checkpointing()
1261
+ if args.train_text_encoder:
1262
+ text_encoder_one.gradient_checkpointing_enable()
1263
+
1264
+ # now we will add new LoRA weights to the attention layers
1265
+ unet_lora_config = LoraConfig(
1266
+ r=args.rank,
1267
+ lora_alpha=args.rank,
1268
+ lora_dropout=args.lora_dropout,
1269
+ use_dora=args.use_dora,
1270
+ init_lora_weights="gaussian",
1271
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
1272
+ )
1273
+ unet.add_adapter(unet_lora_config)
1274
+
1275
+ # The text encoder comes from 🤗 transformers, so we cannot directly modify it.
1276
+ # So, instead, we monkey-patch the forward calls of its attention-blocks.
1277
+ if args.train_text_encoder:
1278
+ text_lora_config = LoraConfig(
1279
+ r=args.rank,
1280
+ lora_alpha=args.rank,
1281
+ lora_dropout=args.lora_dropout,
1282
+ use_dora=args.use_dora,
1283
+ init_lora_weights="gaussian",
1284
+ target_modules=["q_proj", "k_proj", "v_proj", "out_proj"],
1285
+ )
1286
+ text_encoder_one.add_adapter(text_lora_config)
1287
+
1288
+ # if we use textual inversion, we freeze all parameters except for the token embeddings
1289
+ # in text encoder
1290
+ elif args.train_text_encoder_ti:
1291
+ text_lora_parameters_one = []
1292
+ for name, param in text_encoder_one.named_parameters():
1293
+ if "token_embedding" in name:
1294
+ # ensure that dtype is float32, even if rest of the model that isn't trained is loaded in fp16
1295
+ param = param.to(dtype=torch.float32)
1296
+ param.requires_grad = True
1297
+ text_lora_parameters_one.append(param)
1298
+ else:
1299
+ param.requires_grad = False
1300
+
1301
+ # Make sure the trainable params are in float32.
1302
+ if args.mixed_precision == "fp16":
1303
+ models = [unet]
1304
+ if args.train_text_encoder:
1305
+ models.extend([text_encoder_one])
1306
+ for model in models:
1307
+ for param in model.parameters():
1308
+ # only upcast trainable parameters (LoRA) into fp32
1309
+ if param.requires_grad:
1310
+ param.data = param.to(torch.float32)
1311
+
1312
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
1313
+ def save_model_hook(models, weights, output_dir):
1314
+ if accelerator.is_main_process:
1315
+ # there are only two options here. Either are just the unet attn processor layers
1316
+ # or there are the unet and text encoder atten layers
1317
+ unet_lora_layers_to_save = None
1318
+ text_encoder_one_lora_layers_to_save = None
1319
+
1320
+ for model in models:
1321
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
1322
+ unet_lora_layers_to_save = convert_state_dict_to_diffusers(get_peft_model_state_dict(model))
1323
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
1324
+ if args.train_text_encoder:
1325
+ text_encoder_one_lora_layers_to_save = convert_state_dict_to_diffusers(
1326
+ get_peft_model_state_dict(model)
1327
+ )
1328
+ else:
1329
+ raise ValueError(f"unexpected save model: {model.__class__}")
1330
+
1331
+ # make sure to pop weight so that corresponding model is not saved again
1332
+ weights.pop()
1333
+
1334
+ StableDiffusionPipeline.save_lora_weights(
1335
+ output_dir,
1336
+ unet_lora_layers=unet_lora_layers_to_save,
1337
+ text_encoder_lora_layers=text_encoder_one_lora_layers_to_save,
1338
+ )
1339
+ if args.train_text_encoder_ti:
1340
+ embedding_handler.save_embeddings(f"{args.output_dir}/{Path(args.output_dir).name}_emb.safetensors")
1341
+
1342
+ def load_model_hook(models, input_dir):
1343
+ unet_ = None
1344
+ text_encoder_one_ = None
1345
+
1346
+ while len(models) > 0:
1347
+ model = models.pop()
1348
+
1349
+ if isinstance(model, type(accelerator.unwrap_model(unet))):
1350
+ unet_ = model
1351
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder_one))):
1352
+ text_encoder_one_ = model
1353
+ else:
1354
+ raise ValueError(f"unexpected save model: {model.__class__}")
1355
+
1356
+ lora_state_dict, network_alphas = StableDiffusionPipeline.lora_state_dict(input_dir)
1357
+
1358
+ unet_state_dict = {f"{k.replace('unet.', '')}": v for k, v in lora_state_dict.items() if k.startswith("unet.")}
1359
+ unet_state_dict = convert_unet_state_dict_to_peft(unet_state_dict)
1360
+ incompatible_keys = set_peft_model_state_dict(unet_, unet_state_dict, adapter_name="default")
1361
+ if incompatible_keys is not None:
1362
+ # check only for unexpected keys
1363
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
1364
+ if unexpected_keys:
1365
+ logger.warning(
1366
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
1367
+ f" {unexpected_keys}. "
1368
+ )
1369
+
1370
+ if args.train_text_encoder:
1371
+ # Do we need to call `scale_lora_layers()` here?
1372
+ _set_state_dict_into_text_encoder(lora_state_dict, prefix="text_encoder.", text_encoder=text_encoder_one_)
1373
+
1374
+ _set_state_dict_into_text_encoder(
1375
+ lora_state_dict, prefix="text_encoder_2.", text_encoder=text_encoder_one_
1376
+ )
1377
+
1378
+ # Make sure the trainable params are in float32. This is again needed since the base models
1379
+ # are in `weight_dtype`. More details:
1380
+ # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
1381
+ if args.mixed_precision == "fp16":
1382
+ models = [unet_]
1383
+ if args.train_text_encoder:
1384
+ models.extend([text_encoder_one_])
1385
+ # only upcast trainable parameters (LoRA) into fp32
1386
+ cast_training_params(models)
1387
+ lora_state_dict, network_alphas = StableDiffusionLoraLoaderMixin.lora_state_dict(input_dir)
1388
+ StableDiffusionLoraLoaderMixin.load_lora_into_unet(lora_state_dict, network_alphas=network_alphas, unet=unet_)
1389
+
1390
+ text_encoder_state_dict = {k: v for k, v in lora_state_dict.items() if "text_encoder." in k}
1391
+ StableDiffusionLoraLoaderMixin.load_lora_into_text_encoder(
1392
+ text_encoder_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_one_
1393
+ )
1394
+
1395
+ accelerator.register_save_state_pre_hook(save_model_hook)
1396
+ accelerator.register_load_state_pre_hook(load_model_hook)
1397
+
1398
+ # Enable TF32 for faster training on Ampere GPUs,
1399
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
1400
+ if args.allow_tf32:
1401
+ torch.backends.cuda.matmul.allow_tf32 = True
1402
+
1403
+ if args.scale_lr:
1404
+ args.learning_rate = (
1405
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
1406
+ )
1407
+
1408
+ unet_lora_parameters = list(filter(lambda p: p.requires_grad, unet.parameters()))
1409
+
1410
+ if args.train_text_encoder:
1411
+ text_lora_parameters_one = list(filter(lambda p: p.requires_grad, text_encoder_one.parameters()))
1412
+
1413
+ # If neither --train_text_encoder nor --train_text_encoder_ti, text_encoders remain frozen during training
1414
+ freeze_text_encoder = not (args.train_text_encoder or args.train_text_encoder_ti)
1415
+
1416
+ # Optimization parameters
1417
+ unet_lora_parameters_with_lr = {"params": unet_lora_parameters, "lr": args.learning_rate}
1418
+ if not freeze_text_encoder:
1419
+ # different learning rate for text encoder and unet
1420
+ text_lora_parameters_one_with_lr = {
1421
+ "params": text_lora_parameters_one,
1422
+ "weight_decay": args.adam_weight_decay_text_encoder
1423
+ if args.adam_weight_decay_text_encoder
1424
+ else args.adam_weight_decay,
1425
+ "lr": args.text_encoder_lr if args.text_encoder_lr else args.learning_rate,
1426
+ }
1427
+ params_to_optimize = [unet_lora_parameters_with_lr, text_lora_parameters_one_with_lr]
1428
+ else:
1429
+ params_to_optimize = [unet_lora_parameters_with_lr]
1430
+
1431
+ # Optimizer creation
1432
+ if not (args.optimizer.lower() == "prodigy" or args.optimizer.lower() == "adamw"):
1433
+ logger.warning(
1434
+ f"Unsupported choice of optimizer: {args.optimizer}.Supported optimizers include [adamW, prodigy]."
1435
+ "Defaulting to adamW"
1436
+ )
1437
+ args.optimizer = "adamw"
1438
+
1439
+ if args.use_8bit_adam and not args.optimizer.lower() == "adamw":
1440
+ logger.warning(
1441
+ f"use_8bit_adam is ignored when optimizer is not set to 'AdamW'. Optimizer was "
1442
+ f"set to {args.optimizer.lower()}"
1443
+ )
1444
+
1445
+ if args.optimizer.lower() == "adamw":
1446
+ if args.use_8bit_adam:
1447
+ try:
1448
+ import bitsandbytes as bnb
1449
+ except ImportError:
1450
+ raise ImportError(
1451
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
1452
+ )
1453
+
1454
+ optimizer_class = bnb.optim.AdamW8bit
1455
+ else:
1456
+ optimizer_class = torch.optim.AdamW
1457
+
1458
+ optimizer = optimizer_class(
1459
+ params_to_optimize,
1460
+ betas=(args.adam_beta1, args.adam_beta2),
1461
+ weight_decay=args.adam_weight_decay,
1462
+ eps=args.adam_epsilon,
1463
+ )
1464
+
1465
+ if args.optimizer.lower() == "prodigy":
1466
+ try:
1467
+ import prodigyopt
1468
+ except ImportError:
1469
+ raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
1470
+
1471
+ optimizer_class = prodigyopt.Prodigy
1472
+
1473
+ if args.learning_rate <= 0.1:
1474
+ logger.warning(
1475
+ "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
1476
+ )
1477
+ if args.train_text_encoder and args.text_encoder_lr:
1478
+ logger.warning(
1479
+ f"Learning rates were provided both for the unet and the text encoder- e.g. text_encoder_lr:"
1480
+ f" {args.text_encoder_lr} and learning_rate: {args.learning_rate}. "
1481
+ f"When using prodigy only learning_rate is used as the initial learning rate."
1482
+ )
1483
+ # changes the learning rate of text_encoder_parameters_one to be
1484
+ # --learning_rate
1485
+ params_to_optimize[1]["lr"] = args.learning_rate
1486
+
1487
+ optimizer = optimizer_class(
1488
+ params_to_optimize,
1489
+ betas=(args.adam_beta1, args.adam_beta2),
1490
+ beta3=args.prodigy_beta3,
1491
+ weight_decay=args.adam_weight_decay,
1492
+ eps=args.adam_epsilon,
1493
+ decouple=args.prodigy_decouple,
1494
+ use_bias_correction=args.prodigy_use_bias_correction,
1495
+ safeguard_warmup=args.prodigy_safeguard_warmup,
1496
+ )
1497
+
1498
+ # Dataset and DataLoaders creation:
1499
+ train_dataset = DreamBoothDataset(
1500
+ instance_data_root=args.instance_data_dir,
1501
+ instance_prompt=args.instance_prompt,
1502
+ class_prompt=args.class_prompt,
1503
+ dataset_name=args.dataset_name,
1504
+ dataset_config_name=args.dataset_config_name,
1505
+ cache_dir=args.cache_dir,
1506
+ image_column=args.image_column,
1507
+ train_text_encoder_ti=args.train_text_encoder_ti,
1508
+ caption_column=args.caption_column,
1509
+ class_data_root=args.class_data_dir if args.with_prior_preservation else None,
1510
+ token_abstraction_dict=token_abstraction_dict if args.train_text_encoder_ti else None,
1511
+ class_num=args.num_class_images,
1512
+ size=args.resolution,
1513
+ repeats=args.repeats,
1514
+ center_crop=args.center_crop,
1515
+ )
1516
+
1517
+ train_dataloader = torch.utils.data.DataLoader(
1518
+ train_dataset,
1519
+ batch_size=args.train_batch_size,
1520
+ shuffle=True,
1521
+ collate_fn=lambda examples: collate_fn(examples, args.with_prior_preservation),
1522
+ num_workers=args.dataloader_num_workers,
1523
+ )
1524
+
1525
+ if not args.train_text_encoder:
1526
+ tokenizers = [tokenizer_one]
1527
+ text_encoders = [text_encoder_one]
1528
+
1529
+ def compute_text_embeddings(prompt, text_encoders, tokenizers):
1530
+ with torch.no_grad():
1531
+ prompt_embeds = encode_prompt(text_encoders, tokenizers, prompt)
1532
+ prompt_embeds = prompt_embeds.to(accelerator.device)
1533
+ return prompt_embeds
1534
+
1535
+ # If no type of tuning is done on the text_encoder and custom instance prompts are NOT
1536
+ # provided (i.e. the --instance_prompt is used for all images), we encode the instance prompt once to avoid
1537
+ # the redundant encoding.
1538
+ if freeze_text_encoder and not train_dataset.custom_instance_prompts:
1539
+ instance_prompt_hidden_states = compute_text_embeddings(args.instance_prompt, text_encoders, tokenizers)
1540
+
1541
+ # Handle class prompt for prior-preservation.
1542
+ if args.with_prior_preservation:
1543
+ if freeze_text_encoder:
1544
+ class_prompt_hidden_states = compute_text_embeddings(args.class_prompt, text_encoders, tokenizers)
1545
+
1546
+ # Clear the memory here
1547
+ if freeze_text_encoder and not train_dataset.custom_instance_prompts:
1548
+ del tokenizers, text_encoders
1549
+ gc.collect()
1550
+ torch.cuda.empty_cache()
1551
+
1552
+ # if --train_text_encoder_ti we need add_special_tokens to be True for textual inversion
1553
+ add_special_tokens = True if args.train_text_encoder_ti else False
1554
+
1555
+ if not train_dataset.custom_instance_prompts:
1556
+ if freeze_text_encoder:
1557
+ prompt_embeds = instance_prompt_hidden_states
1558
+ if args.with_prior_preservation:
1559
+ prompt_embeds = torch.cat([prompt_embeds, class_prompt_hidden_states], dim=0)
1560
+
1561
+ # if we're optimizing the text encoder (both if instance prompt is used for all images or custom prompts) we need to tokenize and encode the
1562
+ # batch prompts on all training steps
1563
+ else:
1564
+ tokens_one = tokenize_prompt(tokenizer_one, args.instance_prompt, add_special_tokens)
1565
+ if args.with_prior_preservation:
1566
+ class_tokens_one = tokenize_prompt(tokenizer_one, args.class_prompt, add_special_tokens)
1567
+ tokens_one = torch.cat([tokens_one, class_tokens_one], dim=0)
1568
+
1569
+ if args.train_text_encoder_ti and args.validation_prompt:
1570
+ # replace instances of --token_abstraction in validation prompt with the new tokens: "<si><si+1>" etc.
1571
+ for token_abs, token_replacement in train_dataset.token_abstraction_dict.items():
1572
+ args.validation_prompt = args.validation_prompt.replace(token_abs, "".join(token_replacement))
1573
+ print("validation prompt:", args.validation_prompt)
1574
+
1575
+ if args.cache_latents:
1576
+ latents_cache = []
1577
+ for batch in tqdm(train_dataloader, desc="Caching latents"):
1578
+ with torch.no_grad():
1579
+ batch["pixel_values"] = batch["pixel_values"].to(
1580
+ accelerator.device, non_blocking=True, dtype=torch.float32
1581
+ )
1582
+ latents_cache.append(vae.encode(batch["pixel_values"]).latent_dist)
1583
+
1584
+ if args.validation_prompt is None:
1585
+ del vae
1586
+ if torch.cuda.is_available():
1587
+ torch.cuda.empty_cache()
1588
+
1589
+ # Scheduler and math around the number of training steps.
1590
+ # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation.
1591
+ num_warmup_steps_for_scheduler = args.lr_warmup_steps * accelerator.num_processes
1592
+ if args.max_train_steps is None:
1593
+ len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes)
1594
+ num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps)
1595
+ num_training_steps_for_scheduler = (
1596
+ args.num_train_epochs * num_update_steps_per_epoch * accelerator.num_processes
1597
+ )
1598
+ else:
1599
+ num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes
1600
+
1601
+ lr_scheduler = get_scheduler(
1602
+ args.lr_scheduler,
1603
+ optimizer=optimizer,
1604
+ num_warmup_steps=num_warmup_steps_for_scheduler,
1605
+ num_training_steps=num_training_steps_for_scheduler,
1606
+ num_cycles=args.lr_num_cycles,
1607
+ power=args.lr_power,
1608
+ )
1609
+
1610
+ # Prepare everything with our `accelerator`.
1611
+ if not freeze_text_encoder:
1612
+ unet, text_encoder_one, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1613
+ unet, text_encoder_one, optimizer, train_dataloader, lr_scheduler
1614
+ )
1615
+ else:
1616
+ unet, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1617
+ unet, optimizer, train_dataloader, lr_scheduler
1618
+ )
1619
+
1620
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1621
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1622
+ if args.max_train_steps is None:
1623
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1624
+ if num_training_steps_for_scheduler != args.max_train_steps * accelerator.num_processes:
1625
+ logger.warning(
1626
+ f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match "
1627
+ f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. "
1628
+ f"This inconsistency may result in the learning rate scheduler not functioning properly."
1629
+ )
1630
+ # Afterwards we recalculate our number of training epochs
1631
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1632
+
1633
+ # We need to initialize the trackers we use, and also store our configuration.
1634
+ # The trackers initializes automatically on the main process.
1635
+ if accelerator.is_main_process:
1636
+ accelerator.init_trackers("dreambooth-lora-sd-15", config=vars(args))
1637
+
1638
+ # Train!
1639
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1640
+
1641
+ logger.info("***** Running training *****")
1642
+ logger.info(f" Num examples = {len(train_dataset)}")
1643
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1644
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
1645
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1646
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1647
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
1648
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1649
+ global_step = 0
1650
+ first_epoch = 0
1651
+
1652
+ # Potentially load in the weights and states from a previous save
1653
+ if args.resume_from_checkpoint:
1654
+ if args.resume_from_checkpoint != "latest":
1655
+ path = os.path.basename(args.resume_from_checkpoint)
1656
+ else:
1657
+ # Get the mos recent checkpoint
1658
+ dirs = os.listdir(args.output_dir)
1659
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1660
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1661
+ path = dirs[-1] if len(dirs) > 0 else None
1662
+
1663
+ if path is None:
1664
+ accelerator.print(
1665
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1666
+ )
1667
+ args.resume_from_checkpoint = None
1668
+ initial_global_step = 0
1669
+ else:
1670
+ accelerator.print(f"Resuming from checkpoint {path}")
1671
+ accelerator.load_state(os.path.join(args.output_dir, path))
1672
+ global_step = int(path.split("-")[1])
1673
+
1674
+ initial_global_step = global_step
1675
+ first_epoch = global_step // num_update_steps_per_epoch
1676
+
1677
+ else:
1678
+ initial_global_step = 0
1679
+
1680
+ progress_bar = tqdm(
1681
+ range(0, args.max_train_steps),
1682
+ initial=initial_global_step,
1683
+ desc="Steps",
1684
+ # Only show the progress bar once on each machine.
1685
+ disable=not accelerator.is_local_main_process,
1686
+ )
1687
+
1688
+ if args.train_text_encoder:
1689
+ num_train_epochs_text_encoder = int(args.train_text_encoder_frac * args.num_train_epochs)
1690
+ elif args.train_text_encoder_ti: # args.train_text_encoder_ti
1691
+ num_train_epochs_text_encoder = int(args.train_text_encoder_ti_frac * args.num_train_epochs)
1692
+
1693
+ for epoch in range(first_epoch, args.num_train_epochs):
1694
+ # if performing any kind of optimization of text_encoder params
1695
+ if args.train_text_encoder or args.train_text_encoder_ti:
1696
+ if epoch == num_train_epochs_text_encoder:
1697
+ print("PIVOT HALFWAY", epoch)
1698
+ # stopping optimization of text_encoder params
1699
+ # re setting the optimizer to optimize only on unet params
1700
+ optimizer.param_groups[1]["lr"] = 0.0
1701
+
1702
+ else:
1703
+ # still optimizng the text encoder
1704
+ text_encoder_one.train()
1705
+ # set top parameter requires_grad = True for gradient checkpointing works
1706
+ if args.train_text_encoder:
1707
+ text_encoder_one.text_model.embeddings.requires_grad_(True)
1708
+
1709
+ unet.train()
1710
+ for step, batch in enumerate(train_dataloader):
1711
+ with accelerator.accumulate(unet):
1712
+ prompts = batch["prompts"]
1713
+ # encode batch prompts when custom prompts are provided for each image -
1714
+ if train_dataset.custom_instance_prompts:
1715
+ if freeze_text_encoder:
1716
+ prompt_embeds = compute_text_embeddings(prompts, text_encoders, tokenizers)
1717
+
1718
+ else:
1719
+ tokens_one = tokenize_prompt(tokenizer_one, prompts, add_special_tokens)
1720
+
1721
+ if args.cache_latents:
1722
+ model_input = latents_cache[step].sample()
1723
+ else:
1724
+ pixel_values = batch["pixel_values"].to(dtype=vae.dtype)
1725
+ model_input = vae.encode(pixel_values).latent_dist.sample()
1726
+
1727
+ model_input = model_input * vae_scaling_factor
1728
+ if args.pretrained_vae_model_name_or_path is None:
1729
+ model_input = model_input.to(weight_dtype)
1730
+
1731
+ # Sample noise that we'll add to the latents
1732
+ noise = torch.randn_like(model_input)
1733
+ if args.noise_offset:
1734
+ # https://www.crosslabs.org//blog/diffusion-with-offset-noise
1735
+ noise += args.noise_offset * torch.randn(
1736
+ (model_input.shape[0], model_input.shape[1], 1, 1), device=model_input.device
1737
+ )
1738
+ bsz = model_input.shape[0]
1739
+ # Sample a random timestep for each image
1740
+ timesteps = torch.randint(
1741
+ 0, noise_scheduler.config.num_train_timesteps, (bsz,), device=model_input.device
1742
+ )
1743
+ timesteps = timesteps.long()
1744
+
1745
+ # Add noise to the model input according to the noise magnitude at each timestep
1746
+ # (this is the forward diffusion process)
1747
+ noisy_model_input = noise_scheduler.add_noise(model_input, noise, timesteps)
1748
+
1749
+ # Calculate the elements to repeat depending on the use of prior-preservation and custom captions.
1750
+ if not train_dataset.custom_instance_prompts:
1751
+ elems_to_repeat_text_embeds = bsz // 2 if args.with_prior_preservation else bsz
1752
+
1753
+ else:
1754
+ elems_to_repeat_text_embeds = 1
1755
+
1756
+ # Predict the noise residual
1757
+ if freeze_text_encoder:
1758
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1759
+ model_pred = unet(noisy_model_input, timesteps, prompt_embeds_input).sample
1760
+ else:
1761
+ prompt_embeds = encode_prompt(
1762
+ text_encoders=[text_encoder_one],
1763
+ tokenizers=None,
1764
+ prompt=None,
1765
+ text_input_ids_list=[tokens_one],
1766
+ )
1767
+ prompt_embeds_input = prompt_embeds.repeat(elems_to_repeat_text_embeds, 1, 1)
1768
+ model_pred = unet(noisy_model_input, timesteps, prompt_embeds_input).sample
1769
+
1770
+ # Get the target for loss depending on the prediction type
1771
+ if noise_scheduler.config.prediction_type == "epsilon":
1772
+ target = noise
1773
+ elif noise_scheduler.config.prediction_type == "v_prediction":
1774
+ target = noise_scheduler.get_velocity(model_input, noise, timesteps)
1775
+ else:
1776
+ raise ValueError(f"Unknown prediction type {noise_scheduler.config.prediction_type}")
1777
+
1778
+ if args.with_prior_preservation:
1779
+ # Chunk the noise and model_pred into two parts and compute the loss on each part separately.
1780
+ model_pred, model_pred_prior = torch.chunk(model_pred, 2, dim=0)
1781
+ target, target_prior = torch.chunk(target, 2, dim=0)
1782
+
1783
+ # Compute prior loss
1784
+ prior_loss = F.mse_loss(model_pred_prior.float(), target_prior.float(), reduction="mean")
1785
+
1786
+ if args.snr_gamma is None:
1787
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="mean")
1788
+ else:
1789
+ # Compute loss-weights as per Section 3.4 of https://huggingface.co/papers/2303.09556.
1790
+ # Since we predict the noise instead of x_0, the original formulation is slightly changed.
1791
+ # This is discussed in Section 4.2 of the same paper.
1792
+
1793
+ if args.with_prior_preservation:
1794
+ # if we're using prior preservation, we calc snr for instance loss only -
1795
+ # and hence only need timesteps corresponding to instance images
1796
+ snr_timesteps, _ = torch.chunk(timesteps, 2, dim=0)
1797
+ else:
1798
+ snr_timesteps = timesteps
1799
+
1800
+ snr = compute_snr(noise_scheduler, snr_timesteps)
1801
+ base_weight = (
1802
+ torch.stack([snr, args.snr_gamma * torch.ones_like(snr_timesteps)], dim=1).min(dim=1)[0] / snr
1803
+ )
1804
+
1805
+ if noise_scheduler.config.prediction_type == "v_prediction":
1806
+ # Velocity objective needs to be floored to an SNR weight of one.
1807
+ mse_loss_weights = base_weight + 1
1808
+ else:
1809
+ # Epsilon and sample both use the same loss weights.
1810
+ mse_loss_weights = base_weight
1811
+
1812
+ loss = F.mse_loss(model_pred.float(), target.float(), reduction="none")
1813
+ loss = loss.mean(dim=list(range(1, len(loss.shape)))) * mse_loss_weights
1814
+ loss = loss.mean()
1815
+
1816
+ if args.with_prior_preservation:
1817
+ # Add the prior loss to the instance loss.
1818
+ loss = loss + args.prior_loss_weight * prior_loss
1819
+
1820
+ accelerator.backward(loss)
1821
+ if accelerator.sync_gradients:
1822
+ params_to_clip = (
1823
+ itertools.chain(unet_lora_parameters, text_lora_parameters_one)
1824
+ if (args.train_text_encoder or args.train_text_encoder_ti)
1825
+ else unet_lora_parameters
1826
+ )
1827
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1828
+ optimizer.step()
1829
+ lr_scheduler.step()
1830
+ optimizer.zero_grad()
1831
+
1832
+ # every step, we reset the embeddings to the original embeddings.
1833
+ if args.train_text_encoder_ti:
1834
+ for idx, text_encoder in enumerate(text_encoders):
1835
+ embedding_handler.retract_embeddings()
1836
+
1837
+ # Checks if the accelerator has performed an optimization step behind the scenes
1838
+ if accelerator.sync_gradients:
1839
+ progress_bar.update(1)
1840
+ global_step += 1
1841
+
1842
+ if accelerator.is_main_process:
1843
+ if global_step % args.checkpointing_steps == 0:
1844
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1845
+ if args.checkpoints_total_limit is not None:
1846
+ checkpoints = os.listdir(args.output_dir)
1847
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1848
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1849
+
1850
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1851
+ if len(checkpoints) >= args.checkpoints_total_limit:
1852
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1853
+ removing_checkpoints = checkpoints[0:num_to_remove]
1854
+
1855
+ logger.info(
1856
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1857
+ )
1858
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1859
+
1860
+ for removing_checkpoint in removing_checkpoints:
1861
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1862
+ shutil.rmtree(removing_checkpoint)
1863
+
1864
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1865
+ accelerator.save_state(save_path)
1866
+ logger.info(f"Saved state to {save_path}")
1867
+
1868
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1869
+ progress_bar.set_postfix(**logs)
1870
+ accelerator.log(logs, step=global_step)
1871
+
1872
+ if global_step >= args.max_train_steps:
1873
+ break
1874
+
1875
+ if accelerator.is_main_process:
1876
+ if args.validation_prompt is not None and epoch % args.validation_epochs == 0:
1877
+ logger.info(
1878
+ f"Running validation... \n Generating {args.num_validation_images} images with prompt:"
1879
+ f" {args.validation_prompt}."
1880
+ )
1881
+ # create pipeline
1882
+ if freeze_text_encoder:
1883
+ text_encoder_one = text_encoder_cls_one.from_pretrained(
1884
+ args.pretrained_model_name_or_path,
1885
+ subfolder="text_encoder",
1886
+ revision=args.revision,
1887
+ variant=args.variant,
1888
+ )
1889
+ pipeline = StableDiffusionPipeline.from_pretrained(
1890
+ args.pretrained_model_name_or_path,
1891
+ vae=vae,
1892
+ tokenizer=tokenizer_one,
1893
+ text_encoder=accelerator.unwrap_model(text_encoder_one),
1894
+ unet=accelerator.unwrap_model(unet),
1895
+ revision=args.revision,
1896
+ variant=args.variant,
1897
+ torch_dtype=weight_dtype,
1898
+ )
1899
+
1900
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1901
+ scheduler_args = {}
1902
+
1903
+ if "variance_type" in pipeline.scheduler.config:
1904
+ variance_type = pipeline.scheduler.config.variance_type
1905
+
1906
+ if variance_type in ["learned", "learned_range"]:
1907
+ variance_type = "fixed_small"
1908
+
1909
+ scheduler_args["variance_type"] = variance_type
1910
+
1911
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(
1912
+ pipeline.scheduler.config, **scheduler_args
1913
+ )
1914
+
1915
+ pipeline = pipeline.to(accelerator.device)
1916
+ pipeline.set_progress_bar_config(disable=True)
1917
+
1918
+ # run inference
1919
+ generator = (
1920
+ torch.Generator(device=accelerator.device).manual_seed(args.seed)
1921
+ if args.seed is not None
1922
+ else None
1923
+ )
1924
+ pipeline_args = {"prompt": args.validation_prompt}
1925
+
1926
+ if torch.backends.mps.is_available():
1927
+ autocast_ctx = nullcontext()
1928
+ else:
1929
+ autocast_ctx = torch.autocast(accelerator.device.type)
1930
+
1931
+ with autocast_ctx:
1932
+ images = [
1933
+ pipeline(**pipeline_args, generator=generator).images[0]
1934
+ for _ in range(args.num_validation_images)
1935
+ ]
1936
+
1937
+ for tracker in accelerator.trackers:
1938
+ if tracker.name == "tensorboard":
1939
+ np_images = np.stack([np.asarray(img) for img in images])
1940
+ tracker.writer.add_images("validation", np_images, epoch, dataformats="NHWC")
1941
+ if tracker.name == "wandb":
1942
+ tracker.log(
1943
+ {
1944
+ "validation": [
1945
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
1946
+ for i, image in enumerate(images)
1947
+ ]
1948
+ }
1949
+ )
1950
+ del pipeline
1951
+ torch.cuda.empty_cache()
1952
+
1953
+ # Save the lora layers
1954
+ accelerator.wait_for_everyone()
1955
+ if accelerator.is_main_process:
1956
+ unet = accelerator.unwrap_model(unet)
1957
+ unet = unet.to(torch.float32)
1958
+ unet_lora_layers = convert_state_dict_to_diffusers(get_peft_model_state_dict(unet))
1959
+
1960
+ if args.train_text_encoder:
1961
+ text_encoder_one = accelerator.unwrap_model(text_encoder_one)
1962
+ text_encoder_lora_layers = convert_state_dict_to_diffusers(
1963
+ get_peft_model_state_dict(text_encoder_one.to(torch.float32))
1964
+ )
1965
+ else:
1966
+ text_encoder_lora_layers = None
1967
+
1968
+ StableDiffusionPipeline.save_lora_weights(
1969
+ save_directory=args.output_dir,
1970
+ unet_lora_layers=unet_lora_layers,
1971
+ text_encoder_lora_layers=text_encoder_lora_layers,
1972
+ )
1973
+
1974
+ if args.train_text_encoder_ti:
1975
+ embeddings_path = f"{args.output_dir}/{args.output_dir}_emb.safetensors"
1976
+ embedding_handler.save_embeddings(embeddings_path)
1977
+
1978
+ images = []
1979
+ if args.validation_prompt and args.num_validation_images > 0:
1980
+ # Final inference
1981
+ # Load previous pipeline
1982
+ vae = AutoencoderKL.from_pretrained(
1983
+ vae_path,
1984
+ subfolder="vae" if args.pretrained_vae_model_name_or_path is None else None,
1985
+ revision=args.revision,
1986
+ variant=args.variant,
1987
+ torch_dtype=weight_dtype,
1988
+ )
1989
+ pipeline = StableDiffusionPipeline.from_pretrained(
1990
+ args.pretrained_model_name_or_path,
1991
+ vae=vae,
1992
+ revision=args.revision,
1993
+ variant=args.variant,
1994
+ torch_dtype=weight_dtype,
1995
+ )
1996
+
1997
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
1998
+ scheduler_args = {}
1999
+
2000
+ if "variance_type" in pipeline.scheduler.config:
2001
+ variance_type = pipeline.scheduler.config.variance_type
2002
+
2003
+ if variance_type in ["learned", "learned_range"]:
2004
+ variance_type = "fixed_small"
2005
+
2006
+ scheduler_args["variance_type"] = variance_type
2007
+
2008
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config, **scheduler_args)
2009
+
2010
+ # load attention processors
2011
+ pipeline.load_lora_weights(args.output_dir)
2012
+
2013
+ # load new tokens
2014
+ if args.train_text_encoder_ti:
2015
+ state_dict = load_file(embeddings_path)
2016
+ all_new_tokens = []
2017
+ for key, value in token_abstraction_dict.items():
2018
+ all_new_tokens.extend(value)
2019
+ pipeline.load_textual_inversion(
2020
+ state_dict["clip_l"],
2021
+ token=all_new_tokens,
2022
+ text_encoder=pipeline.text_encoder,
2023
+ tokenizer=pipeline.tokenizer,
2024
+ )
2025
+ # run inference
2026
+ pipeline = pipeline.to(accelerator.device)
2027
+ generator = (
2028
+ torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None
2029
+ )
2030
+ images = [
2031
+ pipeline(args.validation_prompt, num_inference_steps=25, generator=generator).images[0]
2032
+ for _ in range(args.num_validation_images)
2033
+ ]
2034
+
2035
+ for tracker in accelerator.trackers:
2036
+ if tracker.name == "tensorboard":
2037
+ np_images = np.stack([np.asarray(img) for img in images])
2038
+ tracker.writer.add_images("test", np_images, epoch, dataformats="NHWC")
2039
+ if tracker.name == "wandb":
2040
+ tracker.log(
2041
+ {
2042
+ "test": [
2043
+ wandb.Image(image, caption=f"{i}: {args.validation_prompt}")
2044
+ for i, image in enumerate(images)
2045
+ ]
2046
+ }
2047
+ )
2048
+
2049
+ # Convert to WebUI format
2050
+ lora_state_dict = load_file(f"{args.output_dir}/pytorch_lora_weights.safetensors")
2051
+ peft_state_dict = convert_all_state_dict_to_peft(lora_state_dict)
2052
+ kohya_state_dict = convert_state_dict_to_kohya(peft_state_dict)
2053
+ save_file(kohya_state_dict, f"{args.output_dir}/{Path(args.output_dir).name}.safetensors")
2054
+
2055
+ save_model_card(
2056
+ model_id if not args.push_to_hub else repo_id,
2057
+ use_dora=args.use_dora,
2058
+ images=images,
2059
+ base_model=args.pretrained_model_name_or_path,
2060
+ train_text_encoder=args.train_text_encoder,
2061
+ train_text_encoder_ti=args.train_text_encoder_ti,
2062
+ token_abstraction_dict=train_dataset.token_abstraction_dict,
2063
+ instance_prompt=args.instance_prompt,
2064
+ validation_prompt=args.validation_prompt,
2065
+ repo_folder=args.output_dir,
2066
+ vae_path=args.pretrained_vae_model_name_or_path,
2067
+ )
2068
+ if args.push_to_hub:
2069
+ upload_folder(
2070
+ repo_id=repo_id,
2071
+ folder_path=args.output_dir,
2072
+ commit_message="End of training",
2073
+ ignore_patterns=["step_*", "epoch_*"],
2074
+ )
2075
+
2076
+ accelerator.end_training()
2077
+
2078
+
2079
+ if __name__ == "__main__":
2080
+ args = parse_args()
2081
+ main(args)
exp_code/1_benchmark/diffusers-WanS2V/examples/advanced_diffusion_training/train_dreambooth_lora_sdxl_advanced.py ADDED
The diff for this file is too large to render. See raw diff
 
exp_code/1_benchmark/diffusers-WanS2V/examples/amused/README.md ADDED
@@ -0,0 +1,326 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ ## Amused training
2
+
3
+ Amused can be finetuned on simple datasets relatively cheaply and quickly. Using 8bit optimizers, lora, and gradient accumulation, amused can be finetuned with as little as 5.5 GB. Here are a set of examples for finetuning amused on some relatively simple datasets. These training recipes are aggressively oriented towards minimal resources and fast verification -- i.e. the batch sizes are quite low and the learning rates are quite high. For optimal quality, you will probably want to increase the batch sizes and decrease learning rates.
4
+
5
+ All training examples use fp16 mixed precision and gradient checkpointing. We don't show 8 bit adam + lora as its about the same memory use as just using lora (bitsandbytes uses full precision optimizer states for weights below a minimum size).
6
+
7
+ ### Finetuning the 256 checkpoint
8
+
9
+ These examples finetune on this [nouns](https://huggingface.co/datasets/m1guelpf/nouns) dataset.
10
+
11
+ Example results:
12
+
13
+ ![noun1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun1.png) ![noun2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun2.png) ![noun3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/noun3.png)
14
+
15
+
16
+ #### Full finetuning
17
+
18
+ Batch size: 8, Learning rate: 1e-4, Gives decent results in 750-1000 steps
19
+
20
+ | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used |
21
+ |------------|-----------------------------|------------------|-------------|
22
+ | 8 | 1 | 8 | 19.7 GB |
23
+ | 4 | 2 | 8 | 18.3 GB |
24
+ | 1 | 8 | 8 | 17.9 GB |
25
+
26
+ ```sh
27
+ accelerate launch train_amused.py \
28
+ --output_dir <output path> \
29
+ --train_batch_size <batch size> \
30
+ --gradient_accumulation_steps <gradient accumulation steps> \
31
+ --learning_rate 1e-4 \
32
+ --pretrained_model_name_or_path amused/amused-256 \
33
+ --instance_data_dataset 'm1guelpf/nouns' \
34
+ --image_key image \
35
+ --prompt_key text \
36
+ --resolution 256 \
37
+ --mixed_precision fp16 \
38
+ --lr_scheduler constant \
39
+ --validation_prompts \
40
+ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \
41
+ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \
42
+ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \
43
+ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \
44
+ 'a pixel art character with square red glasses' \
45
+ 'a pixel art character' \
46
+ 'square red glasses on a pixel art character' \
47
+ 'square red glasses on a pixel art character with a baseball-shaped head' \
48
+ --max_train_steps 10000 \
49
+ --checkpointing_steps 500 \
50
+ --validation_steps 250 \
51
+ --gradient_checkpointing
52
+ ```
53
+
54
+ #### Full finetuning + 8 bit adam
55
+
56
+ Note that this training config keeps the batch size low and the learning rate high to get results fast with low resources. However, due to 8 bit adam, it will diverge eventually. If you want to train for longer, you will have to up the batch size and lower the learning rate.
57
+
58
+ Batch size: 16, Learning rate: 2e-5, Gives decent results in ~750 steps
59
+
60
+ | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used |
61
+ |------------|-----------------------------|------------------|-------------|
62
+ | 16 | 1 | 16 | 20.1 GB |
63
+ | 8 | 2 | 16 | 15.6 GB |
64
+ | 1 | 16 | 16 | 10.7 GB |
65
+
66
+ ```sh
67
+ accelerate launch train_amused.py \
68
+ --output_dir <output path> \
69
+ --train_batch_size <batch size> \
70
+ --gradient_accumulation_steps <gradient accumulation steps> \
71
+ --learning_rate 2e-5 \
72
+ --use_8bit_adam \
73
+ --pretrained_model_name_or_path amused/amused-256 \
74
+ --instance_data_dataset 'm1guelpf/nouns' \
75
+ --image_key image \
76
+ --prompt_key text \
77
+ --resolution 256 \
78
+ --mixed_precision fp16 \
79
+ --lr_scheduler constant \
80
+ --validation_prompts \
81
+ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \
82
+ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \
83
+ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \
84
+ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \
85
+ 'a pixel art character with square red glasses' \
86
+ 'a pixel art character' \
87
+ 'square red glasses on a pixel art character' \
88
+ 'square red glasses on a pixel art character with a baseball-shaped head' \
89
+ --max_train_steps 10000 \
90
+ --checkpointing_steps 500 \
91
+ --validation_steps 250 \
92
+ --gradient_checkpointing
93
+ ```
94
+
95
+ #### Full finetuning + lora
96
+
97
+ Batch size: 16, Learning rate: 8e-4, Gives decent results in 1000-1250 steps
98
+
99
+ | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used |
100
+ |------------|-----------------------------|------------------|-------------|
101
+ | 16 | 1 | 16 | 14.1 GB |
102
+ | 8 | 2 | 16 | 10.1 GB |
103
+ | 1 | 16 | 16 | 6.5 GB |
104
+
105
+ ```sh
106
+ accelerate launch train_amused.py \
107
+ --output_dir <output path> \
108
+ --train_batch_size <batch size> \
109
+ --gradient_accumulation_steps <gradient accumulation steps> \
110
+ --learning_rate 8e-4 \
111
+ --use_lora \
112
+ --pretrained_model_name_or_path amused/amused-256 \
113
+ --instance_data_dataset 'm1guelpf/nouns' \
114
+ --image_key image \
115
+ --prompt_key text \
116
+ --resolution 256 \
117
+ --mixed_precision fp16 \
118
+ --lr_scheduler constant \
119
+ --validation_prompts \
120
+ 'a pixel art character with square red glasses, a baseball-shaped head and a orange-colored body on a dark background' \
121
+ 'a pixel art character with square orange glasses, a lips-shaped head and a red-colored body on a light background' \
122
+ 'a pixel art character with square blue glasses, a microwave-shaped head and a purple-colored body on a sunny background' \
123
+ 'a pixel art character with square red glasses, a baseball-shaped head and a blue-colored body on an orange background' \
124
+ 'a pixel art character with square red glasses' \
125
+ 'a pixel art character' \
126
+ 'square red glasses on a pixel art character' \
127
+ 'square red glasses on a pixel art character with a baseball-shaped head' \
128
+ --max_train_steps 10000 \
129
+ --checkpointing_steps 500 \
130
+ --validation_steps 250 \
131
+ --gradient_checkpointing
132
+ ```
133
+
134
+ ### Finetuning the 512 checkpoint
135
+
136
+ These examples finetune on this [minecraft](https://huggingface.co/monadical-labs/minecraft-preview) dataset.
137
+
138
+ Example results:
139
+
140
+ ![minecraft1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft1.png) ![minecraft2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft2.png) ![minecraft3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/minecraft3.png)
141
+
142
+ #### Full finetuning
143
+
144
+ Batch size: 8, Learning rate: 8e-5, Gives decent results in 500-1000 steps
145
+
146
+ | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used |
147
+ |------------|-----------------------------|------------------|-------------|
148
+ | 8 | 1 | 8 | 24.2 GB |
149
+ | 4 | 2 | 8 | 19.7 GB |
150
+ | 1 | 8 | 8 | 16.99 GB |
151
+
152
+ ```sh
153
+ accelerate launch train_amused.py \
154
+ --output_dir <output path> \
155
+ --train_batch_size <batch size> \
156
+ --gradient_accumulation_steps <gradient accumulation steps> \
157
+ --learning_rate 8e-5 \
158
+ --pretrained_model_name_or_path amused/amused-512 \
159
+ --instance_data_dataset 'monadical-labs/minecraft-preview' \
160
+ --prompt_prefix 'minecraft ' \
161
+ --image_key image \
162
+ --prompt_key text \
163
+ --resolution 512 \
164
+ --mixed_precision fp16 \
165
+ --lr_scheduler constant \
166
+ --validation_prompts \
167
+ 'minecraft Avatar' \
168
+ 'minecraft character' \
169
+ 'minecraft' \
170
+ 'minecraft president' \
171
+ 'minecraft pig' \
172
+ --max_train_steps 10000 \
173
+ --checkpointing_steps 500 \
174
+ --validation_steps 250 \
175
+ --gradient_checkpointing
176
+ ```
177
+
178
+ #### Full finetuning + 8 bit adam
179
+
180
+ Batch size: 8, Learning rate: 5e-6, Gives decent results in 500-1000 steps
181
+
182
+ | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used |
183
+ |------------|-----------------------------|------------------|-------------|
184
+ | 8 | 1 | 8 | 21.2 GB |
185
+ | 4 | 2 | 8 | 13.3 GB |
186
+ | 1 | 8 | 8 | 9.9 GB |
187
+
188
+ ```sh
189
+ accelerate launch train_amused.py \
190
+ --output_dir <output path> \
191
+ --train_batch_size <batch size> \
192
+ --gradient_accumulation_steps <gradient accumulation steps> \
193
+ --learning_rate 5e-6 \
194
+ --pretrained_model_name_or_path amused/amused-512 \
195
+ --instance_data_dataset 'monadical-labs/minecraft-preview' \
196
+ --prompt_prefix 'minecraft ' \
197
+ --image_key image \
198
+ --prompt_key text \
199
+ --resolution 512 \
200
+ --mixed_precision fp16 \
201
+ --lr_scheduler constant \
202
+ --validation_prompts \
203
+ 'minecraft Avatar' \
204
+ 'minecraft character' \
205
+ 'minecraft' \
206
+ 'minecraft president' \
207
+ 'minecraft pig' \
208
+ --max_train_steps 10000 \
209
+ --checkpointing_steps 500 \
210
+ --validation_steps 250 \
211
+ --gradient_checkpointing
212
+ ```
213
+
214
+ #### Full finetuning + lora
215
+
216
+ Batch size: 8, Learning rate: 1e-4, Gives decent results in 500-1000 steps
217
+
218
+ | Batch Size | Gradient Accumulation Steps | Effective Total Batch Size | Memory Used |
219
+ |------------|-----------------------------|------------------|-------------|
220
+ | 8 | 1 | 8 | 12.7 GB |
221
+ | 4 | 2 | 8 | 9.0 GB |
222
+ | 1 | 8 | 8 | 5.6 GB |
223
+
224
+ ```sh
225
+ accelerate launch train_amused.py \
226
+ --output_dir <output path> \
227
+ --train_batch_size <batch size> \
228
+ --gradient_accumulation_steps <gradient accumulation steps> \
229
+ --learning_rate 1e-4 \
230
+ --use_lora \
231
+ --pretrained_model_name_or_path amused/amused-512 \
232
+ --instance_data_dataset 'monadical-labs/minecraft-preview' \
233
+ --prompt_prefix 'minecraft ' \
234
+ --image_key image \
235
+ --prompt_key text \
236
+ --resolution 512 \
237
+ --mixed_precision fp16 \
238
+ --lr_scheduler constant \
239
+ --validation_prompts \
240
+ 'minecraft Avatar' \
241
+ 'minecraft character' \
242
+ 'minecraft' \
243
+ 'minecraft president' \
244
+ 'minecraft pig' \
245
+ --max_train_steps 10000 \
246
+ --checkpointing_steps 500 \
247
+ --validation_steps 250 \
248
+ --gradient_checkpointing
249
+ ```
250
+
251
+ ### Styledrop
252
+
253
+ [Styledrop](https://huggingface.co/papers/2306.00983) is an efficient finetuning method for learning a new style from just one or very few images. It has an optional first stage to generate human picked additional training samples. The additional training samples can be used to augment the initial images. Our examples exclude the optional additional image selection stage and instead we just finetune on a single image.
254
+
255
+ This is our example style image:
256
+ ![example](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png)
257
+
258
+ Download it to your local directory with
259
+ ```sh
260
+ wget https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/A%20mushroom%20in%20%5BV%5D%20style.png
261
+ ```
262
+
263
+ #### 256
264
+
265
+ Example results:
266
+
267
+ ![glowing_256_1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_1.png) ![glowing_256_2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_2.png) ![glowing_256_3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_256_3.png)
268
+
269
+ Learning rate: 4e-4, Gives decent results in 1500-2000 steps
270
+
271
+ Memory used: 6.5 GB
272
+
273
+ ```sh
274
+ accelerate launch train_amused.py \
275
+ --output_dir <output path> \
276
+ --mixed_precision fp16 \
277
+ --report_to wandb \
278
+ --use_lora \
279
+ --pretrained_model_name_or_path amused/amused-256 \
280
+ --train_batch_size 1 \
281
+ --lr_scheduler constant \
282
+ --learning_rate 4e-4 \
283
+ --validation_prompts \
284
+ 'A chihuahua walking on the street in [V] style' \
285
+ 'A banana on the table in [V] style' \
286
+ 'A church on the street in [V] style' \
287
+ 'A tabby cat walking in the forest in [V] style' \
288
+ --instance_data_image 'A mushroom in [V] style.png' \
289
+ --max_train_steps 10000 \
290
+ --checkpointing_steps 500 \
291
+ --validation_steps 100 \
292
+ --resolution 256
293
+ ```
294
+
295
+ #### 512
296
+
297
+ Example results:
298
+
299
+ ![glowing_512_1](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_1.png) ![glowing_512_2](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_2.png) ![glowing_512_3](https://huggingface.co/datasets/diffusers/docs-images/resolve/main/amused/glowing_512_3.png)
300
+
301
+ Learning rate: 1e-3, Lora alpha 1, Gives decent results in 1500-2000 steps
302
+
303
+ Memory used: 5.6 GB
304
+
305
+ ```
306
+ accelerate launch train_amused.py \
307
+ --output_dir <output path> \
308
+ --mixed_precision fp16 \
309
+ --report_to wandb \
310
+ --use_lora \
311
+ --pretrained_model_name_or_path amused/amused-512 \
312
+ --train_batch_size 1 \
313
+ --lr_scheduler constant \
314
+ --learning_rate 1e-3 \
315
+ --validation_prompts \
316
+ 'A chihuahua walking on the street in [V] style' \
317
+ 'A banana on the table in [V] style' \
318
+ 'A church on the street in [V] style' \
319
+ 'A tabby cat walking in the forest in [V] style' \
320
+ --instance_data_image 'A mushroom in [V] style.png' \
321
+ --max_train_steps 100000 \
322
+ --checkpointing_steps 500 \
323
+ --validation_steps 100 \
324
+ --resolution 512 \
325
+ --lora_alpha 1
326
+ ```
exp_code/1_benchmark/diffusers-WanS2V/examples/amused/train_amused.py ADDED
@@ -0,0 +1,975 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # coding=utf-8
2
+ # Copyright 2025 The HuggingFace Inc. team.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import copy
18
+ import logging
19
+ import math
20
+ import os
21
+ import shutil
22
+ from contextlib import nullcontext
23
+ from pathlib import Path
24
+
25
+ import torch
26
+ import torch.nn.functional as F
27
+ from accelerate import Accelerator
28
+ from accelerate.logging import get_logger
29
+ from accelerate.utils import ProjectConfiguration, set_seed
30
+ from datasets import load_dataset
31
+ from peft import LoraConfig
32
+ from peft.utils import get_peft_model_state_dict
33
+ from PIL import Image
34
+ from PIL.ImageOps import exif_transpose
35
+ from torch.utils.data import DataLoader, Dataset, default_collate
36
+ from torchvision import transforms
37
+ from transformers import (
38
+ CLIPTextModelWithProjection,
39
+ CLIPTokenizer,
40
+ )
41
+
42
+ import diffusers.optimization
43
+ from diffusers import AmusedPipeline, AmusedScheduler, EMAModel, UVit2DModel, VQModel
44
+ from diffusers.loaders import AmusedLoraLoaderMixin
45
+ from diffusers.utils import is_wandb_available
46
+
47
+
48
+ if is_wandb_available():
49
+ import wandb
50
+
51
+ logger = get_logger(__name__, log_level="INFO")
52
+
53
+
54
+ def parse_args():
55
+ parser = argparse.ArgumentParser()
56
+ parser.add_argument(
57
+ "--pretrained_model_name_or_path",
58
+ type=str,
59
+ default=None,
60
+ required=True,
61
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
62
+ )
63
+ parser.add_argument(
64
+ "--revision",
65
+ type=str,
66
+ default=None,
67
+ required=False,
68
+ help="Revision of pretrained model identifier from huggingface.co/models.",
69
+ )
70
+ parser.add_argument(
71
+ "--variant",
72
+ type=str,
73
+ default=None,
74
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
75
+ )
76
+ parser.add_argument(
77
+ "--instance_data_dataset",
78
+ type=str,
79
+ default=None,
80
+ required=False,
81
+ help="A Hugging Face dataset containing the training images",
82
+ )
83
+ parser.add_argument(
84
+ "--instance_data_dir",
85
+ type=str,
86
+ default=None,
87
+ required=False,
88
+ help="A folder containing the training data of instance images.",
89
+ )
90
+ parser.add_argument(
91
+ "--instance_data_image", type=str, default=None, required=False, help="A single training image"
92
+ )
93
+ parser.add_argument(
94
+ "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
95
+ )
96
+ parser.add_argument(
97
+ "--dataloader_num_workers",
98
+ type=int,
99
+ default=0,
100
+ help=(
101
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
102
+ ),
103
+ )
104
+ parser.add_argument(
105
+ "--allow_tf32",
106
+ action="store_true",
107
+ help=(
108
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
109
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
110
+ ),
111
+ )
112
+ parser.add_argument("--use_ema", action="store_true", help="Whether to use EMA model.")
113
+ parser.add_argument("--ema_decay", type=float, default=0.9999)
114
+ parser.add_argument("--ema_update_after_step", type=int, default=0)
115
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
116
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
117
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
118
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
119
+ parser.add_argument(
120
+ "--output_dir",
121
+ type=str,
122
+ default="muse_training",
123
+ help="The output directory where the model predictions and checkpoints will be written.",
124
+ )
125
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
126
+ parser.add_argument(
127
+ "--logging_dir",
128
+ type=str,
129
+ default="logs",
130
+ help=(
131
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
132
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
133
+ ),
134
+ )
135
+ parser.add_argument(
136
+ "--max_train_steps",
137
+ type=int,
138
+ default=None,
139
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
140
+ )
141
+ parser.add_argument(
142
+ "--checkpointing_steps",
143
+ type=int,
144
+ default=500,
145
+ help=(
146
+ "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
147
+ "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
148
+ "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
149
+ "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
150
+ "instructions."
151
+ ),
152
+ )
153
+ parser.add_argument(
154
+ "--logging_steps",
155
+ type=int,
156
+ default=50,
157
+ )
158
+ parser.add_argument(
159
+ "--checkpoints_total_limit",
160
+ type=int,
161
+ default=None,
162
+ help=(
163
+ "Max number of checkpoints to store. Passed as `total_limit` to the `Accelerator` `ProjectConfiguration`."
164
+ " See Accelerator::save_state https://huggingface.co/docs/accelerate/package_reference/accelerator#accelerate.Accelerator.save_state"
165
+ " for more details"
166
+ ),
167
+ )
168
+ parser.add_argument(
169
+ "--resume_from_checkpoint",
170
+ type=str,
171
+ default=None,
172
+ help=(
173
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
174
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
175
+ ),
176
+ )
177
+ parser.add_argument(
178
+ "--train_batch_size", type=int, default=16, help="Batch size (per device) for the training dataloader."
179
+ )
180
+ parser.add_argument(
181
+ "--gradient_accumulation_steps",
182
+ type=int,
183
+ default=1,
184
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
185
+ )
186
+ parser.add_argument(
187
+ "--learning_rate",
188
+ type=float,
189
+ default=0.0003,
190
+ help="Initial learning rate (after the potential warmup period) to use.",
191
+ )
192
+ parser.add_argument(
193
+ "--scale_lr",
194
+ action="store_true",
195
+ default=False,
196
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
197
+ )
198
+ parser.add_argument(
199
+ "--lr_scheduler",
200
+ type=str,
201
+ default="constant",
202
+ help=(
203
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
204
+ ' "constant", "constant_with_warmup"]'
205
+ ),
206
+ )
207
+ parser.add_argument(
208
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
209
+ )
210
+ parser.add_argument(
211
+ "--validation_steps",
212
+ type=int,
213
+ default=100,
214
+ help=(
215
+ "Run validation every X steps. Validation consists of running the prompt"
216
+ " `args.validation_prompt` multiple times: `args.num_validation_images`"
217
+ " and logging the images."
218
+ ),
219
+ )
220
+ parser.add_argument(
221
+ "--mixed_precision",
222
+ type=str,
223
+ default=None,
224
+ choices=["no", "fp16", "bf16"],
225
+ help=(
226
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
227
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
228
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
229
+ ),
230
+ )
231
+ parser.add_argument(
232
+ "--report_to",
233
+ type=str,
234
+ default="wandb",
235
+ help=(
236
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
237
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
238
+ ),
239
+ )
240
+ parser.add_argument("--validation_prompts", type=str, nargs="*")
241
+ parser.add_argument(
242
+ "--resolution",
243
+ type=int,
244
+ default=512,
245
+ help=(
246
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
247
+ " resolution"
248
+ ),
249
+ )
250
+ parser.add_argument("--split_vae_encode", type=int, required=False, default=None)
251
+ parser.add_argument("--min_masking_rate", type=float, default=0.0)
252
+ parser.add_argument("--cond_dropout_prob", type=float, default=0.0)
253
+ parser.add_argument("--max_grad_norm", default=None, type=float, help="Max gradient norm.", required=False)
254
+ parser.add_argument("--use_lora", action="store_true", help="Fine tune the model using LoRa")
255
+ parser.add_argument("--text_encoder_use_lora", action="store_true", help="Fine tune the model using LoRa")
256
+ parser.add_argument("--lora_r", default=16, type=int)
257
+ parser.add_argument("--lora_alpha", default=32, type=int)
258
+ parser.add_argument("--lora_target_modules", default=["to_q", "to_k", "to_v"], type=str, nargs="+")
259
+ parser.add_argument("--text_encoder_lora_r", default=16, type=int)
260
+ parser.add_argument("--text_encoder_lora_alpha", default=32, type=int)
261
+ parser.add_argument("--text_encoder_lora_target_modules", default=["to_q", "to_k", "to_v"], type=str, nargs="+")
262
+ parser.add_argument("--train_text_encoder", action="store_true")
263
+ parser.add_argument("--image_key", type=str, required=False)
264
+ parser.add_argument("--prompt_key", type=str, required=False)
265
+ parser.add_argument(
266
+ "--gradient_checkpointing",
267
+ action="store_true",
268
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
269
+ )
270
+ parser.add_argument("--prompt_prefix", type=str, required=False, default=None)
271
+
272
+ args = parser.parse_args()
273
+
274
+ if args.report_to == "wandb":
275
+ if not is_wandb_available():
276
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
277
+
278
+ num_datasources = sum(
279
+ [x is not None for x in [args.instance_data_dir, args.instance_data_image, args.instance_data_dataset]]
280
+ )
281
+
282
+ if num_datasources != 1:
283
+ raise ValueError(
284
+ "provide one and only one of `--instance_data_dir`, `--instance_data_image`, or `--instance_data_dataset`"
285
+ )
286
+
287
+ if args.instance_data_dir is not None:
288
+ if not os.path.exists(args.instance_data_dir):
289
+ raise ValueError(f"Does not exist: `--args.instance_data_dir` {args.instance_data_dir}")
290
+
291
+ if args.instance_data_image is not None:
292
+ if not os.path.exists(args.instance_data_image):
293
+ raise ValueError(f"Does not exist: `--args.instance_data_image` {args.instance_data_image}")
294
+
295
+ if args.instance_data_dataset is not None and (args.image_key is None or args.prompt_key is None):
296
+ raise ValueError("`--instance_data_dataset` requires setting `--image_key` and `--prompt_key`")
297
+
298
+ return args
299
+
300
+
301
+ class InstanceDataRootDataset(Dataset):
302
+ def __init__(
303
+ self,
304
+ instance_data_root,
305
+ tokenizer,
306
+ size=512,
307
+ ):
308
+ self.size = size
309
+ self.tokenizer = tokenizer
310
+ self.instance_images_path = list(Path(instance_data_root).iterdir())
311
+
312
+ def __len__(self):
313
+ return len(self.instance_images_path)
314
+
315
+ def __getitem__(self, index):
316
+ image_path = self.instance_images_path[index % len(self.instance_images_path)]
317
+ instance_image = Image.open(image_path)
318
+ rv = process_image(instance_image, self.size)
319
+
320
+ prompt = os.path.splitext(os.path.basename(image_path))[0]
321
+ rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt)[0]
322
+ return rv
323
+
324
+
325
+ class InstanceDataImageDataset(Dataset):
326
+ def __init__(
327
+ self,
328
+ instance_data_image,
329
+ train_batch_size,
330
+ size=512,
331
+ ):
332
+ self.value = process_image(Image.open(instance_data_image), size)
333
+ self.train_batch_size = train_batch_size
334
+
335
+ def __len__(self):
336
+ # Needed so a full batch of the data can be returned. Otherwise will return
337
+ # batches of size 1
338
+ return self.train_batch_size
339
+
340
+ def __getitem__(self, index):
341
+ return self.value
342
+
343
+
344
+ class HuggingFaceDataset(Dataset):
345
+ def __init__(
346
+ self,
347
+ hf_dataset,
348
+ tokenizer,
349
+ image_key,
350
+ prompt_key,
351
+ prompt_prefix=None,
352
+ size=512,
353
+ ):
354
+ self.size = size
355
+ self.image_key = image_key
356
+ self.prompt_key = prompt_key
357
+ self.tokenizer = tokenizer
358
+ self.hf_dataset = hf_dataset
359
+ self.prompt_prefix = prompt_prefix
360
+
361
+ def __len__(self):
362
+ return len(self.hf_dataset)
363
+
364
+ def __getitem__(self, index):
365
+ item = self.hf_dataset[index]
366
+
367
+ rv = process_image(item[self.image_key], self.size)
368
+
369
+ prompt = item[self.prompt_key]
370
+
371
+ if self.prompt_prefix is not None:
372
+ prompt = self.prompt_prefix + prompt
373
+
374
+ rv["prompt_input_ids"] = tokenize_prompt(self.tokenizer, prompt)[0]
375
+
376
+ return rv
377
+
378
+
379
+ def process_image(image, size):
380
+ image = exif_transpose(image)
381
+
382
+ if not image.mode == "RGB":
383
+ image = image.convert("RGB")
384
+
385
+ orig_height = image.height
386
+ orig_width = image.width
387
+
388
+ image = transforms.Resize(size, interpolation=transforms.InterpolationMode.BILINEAR)(image)
389
+
390
+ c_top, c_left, _, _ = transforms.RandomCrop.get_params(image, output_size=(size, size))
391
+ image = transforms.functional.crop(image, c_top, c_left, size, size)
392
+
393
+ image = transforms.ToTensor()(image)
394
+
395
+ micro_conds = torch.tensor(
396
+ [orig_width, orig_height, c_top, c_left, 6.0],
397
+ )
398
+
399
+ return {"image": image, "micro_conds": micro_conds}
400
+
401
+
402
+ def tokenize_prompt(tokenizer, prompt):
403
+ return tokenizer(
404
+ prompt,
405
+ truncation=True,
406
+ padding="max_length",
407
+ max_length=77,
408
+ return_tensors="pt",
409
+ ).input_ids
410
+
411
+
412
+ def encode_prompt(text_encoder, input_ids):
413
+ outputs = text_encoder(input_ids, return_dict=True, output_hidden_states=True)
414
+ encoder_hidden_states = outputs.hidden_states[-2]
415
+ cond_embeds = outputs[0]
416
+ return encoder_hidden_states, cond_embeds
417
+
418
+
419
+ def main(args):
420
+ if args.allow_tf32:
421
+ torch.backends.cuda.matmul.allow_tf32 = True
422
+
423
+ logging_dir = Path(args.output_dir, args.logging_dir)
424
+
425
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
426
+
427
+ accelerator = Accelerator(
428
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
429
+ mixed_precision=args.mixed_precision,
430
+ log_with=args.report_to,
431
+ project_config=accelerator_project_config,
432
+ )
433
+ # Disable AMP for MPS.
434
+ if torch.backends.mps.is_available():
435
+ accelerator.native_amp = False
436
+
437
+ if accelerator.is_main_process:
438
+ os.makedirs(args.output_dir, exist_ok=True)
439
+
440
+ # Make one log on every process with the configuration for debugging.
441
+ logging.basicConfig(
442
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
443
+ datefmt="%m/%d/%Y %H:%M:%S",
444
+ level=logging.INFO,
445
+ )
446
+ logger.info(accelerator.state, main_process_only=False)
447
+
448
+ if accelerator.is_main_process:
449
+ accelerator.init_trackers("amused", config=vars(copy.deepcopy(args)))
450
+
451
+ if args.seed is not None:
452
+ set_seed(args.seed)
453
+
454
+ # TODO - will have to fix loading if training text encoder
455
+ text_encoder = CLIPTextModelWithProjection.from_pretrained(
456
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision, variant=args.variant
457
+ )
458
+ tokenizer = CLIPTokenizer.from_pretrained(
459
+ args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision, variant=args.variant
460
+ )
461
+ vq_model = VQModel.from_pretrained(
462
+ args.pretrained_model_name_or_path, subfolder="vqvae", revision=args.revision, variant=args.variant
463
+ )
464
+
465
+ if args.train_text_encoder:
466
+ if args.text_encoder_use_lora:
467
+ lora_config = LoraConfig(
468
+ r=args.text_encoder_lora_r,
469
+ lora_alpha=args.text_encoder_lora_alpha,
470
+ target_modules=args.text_encoder_lora_target_modules,
471
+ )
472
+ text_encoder.add_adapter(lora_config)
473
+ text_encoder.train()
474
+ text_encoder.requires_grad_(True)
475
+ else:
476
+ text_encoder.eval()
477
+ text_encoder.requires_grad_(False)
478
+
479
+ vq_model.requires_grad_(False)
480
+
481
+ model = UVit2DModel.from_pretrained(
482
+ args.pretrained_model_name_or_path,
483
+ subfolder="transformer",
484
+ revision=args.revision,
485
+ variant=args.variant,
486
+ )
487
+
488
+ if args.use_lora:
489
+ lora_config = LoraConfig(
490
+ r=args.lora_r,
491
+ lora_alpha=args.lora_alpha,
492
+ target_modules=args.lora_target_modules,
493
+ )
494
+ model.add_adapter(lora_config)
495
+
496
+ model.train()
497
+
498
+ if args.gradient_checkpointing:
499
+ model.enable_gradient_checkpointing()
500
+ if args.train_text_encoder:
501
+ text_encoder.gradient_checkpointing_enable()
502
+
503
+ if args.use_ema:
504
+ ema = EMAModel(
505
+ model.parameters(),
506
+ decay=args.ema_decay,
507
+ update_after_step=args.ema_update_after_step,
508
+ model_cls=UVit2DModel,
509
+ model_config=model.config,
510
+ )
511
+
512
+ def save_model_hook(models, weights, output_dir):
513
+ if accelerator.is_main_process:
514
+ transformer_lora_layers_to_save = None
515
+ text_encoder_lora_layers_to_save = None
516
+
517
+ for model_ in models:
518
+ if isinstance(model_, type(accelerator.unwrap_model(model))):
519
+ if args.use_lora:
520
+ transformer_lora_layers_to_save = get_peft_model_state_dict(model_)
521
+ else:
522
+ model_.save_pretrained(os.path.join(output_dir, "transformer"))
523
+ elif isinstance(model_, type(accelerator.unwrap_model(text_encoder))):
524
+ if args.text_encoder_use_lora:
525
+ text_encoder_lora_layers_to_save = get_peft_model_state_dict(model_)
526
+ else:
527
+ model_.save_pretrained(os.path.join(output_dir, "text_encoder"))
528
+ else:
529
+ raise ValueError(f"unexpected save model: {model_.__class__}")
530
+
531
+ # make sure to pop weight so that corresponding model is not saved again
532
+ weights.pop()
533
+
534
+ if transformer_lora_layers_to_save is not None or text_encoder_lora_layers_to_save is not None:
535
+ AmusedLoraLoaderMixin.save_lora_weights(
536
+ output_dir,
537
+ transformer_lora_layers=transformer_lora_layers_to_save,
538
+ text_encoder_lora_layers=text_encoder_lora_layers_to_save,
539
+ )
540
+
541
+ if args.use_ema:
542
+ ema.save_pretrained(os.path.join(output_dir, "ema_model"))
543
+
544
+ def load_model_hook(models, input_dir):
545
+ transformer = None
546
+ text_encoder_ = None
547
+
548
+ while len(models) > 0:
549
+ model_ = models.pop()
550
+
551
+ if isinstance(model_, type(accelerator.unwrap_model(model))):
552
+ if args.use_lora:
553
+ transformer = model_
554
+ else:
555
+ load_model = UVit2DModel.from_pretrained(os.path.join(input_dir, "transformer"))
556
+ model_.load_state_dict(load_model.state_dict())
557
+ del load_model
558
+ elif isinstance(model, type(accelerator.unwrap_model(text_encoder))):
559
+ if args.text_encoder_use_lora:
560
+ text_encoder_ = model_
561
+ else:
562
+ load_model = CLIPTextModelWithProjection.from_pretrained(os.path.join(input_dir, "text_encoder"))
563
+ model_.load_state_dict(load_model.state_dict())
564
+ del load_model
565
+ else:
566
+ raise ValueError(f"unexpected save model: {model.__class__}")
567
+
568
+ if transformer is not None or text_encoder_ is not None:
569
+ lora_state_dict, network_alphas = AmusedLoraLoaderMixin.lora_state_dict(input_dir)
570
+ AmusedLoraLoaderMixin.load_lora_into_text_encoder(
571
+ lora_state_dict, network_alphas=network_alphas, text_encoder=text_encoder_
572
+ )
573
+ AmusedLoraLoaderMixin.load_lora_into_transformer(
574
+ lora_state_dict, network_alphas=network_alphas, transformer=transformer
575
+ )
576
+
577
+ if args.use_ema:
578
+ load_from = EMAModel.from_pretrained(os.path.join(input_dir, "ema_model"), model_cls=UVit2DModel)
579
+ ema.load_state_dict(load_from.state_dict())
580
+ del load_from
581
+
582
+ accelerator.register_load_state_pre_hook(load_model_hook)
583
+ accelerator.register_save_state_pre_hook(save_model_hook)
584
+
585
+ if args.scale_lr:
586
+ args.learning_rate = (
587
+ args.learning_rate * args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
588
+ )
589
+
590
+ if args.use_8bit_adam:
591
+ try:
592
+ import bitsandbytes as bnb
593
+ except ImportError:
594
+ raise ImportError(
595
+ "Please install bitsandbytes to use 8-bit Adam. You can do so by running `pip install bitsandbytes`"
596
+ )
597
+
598
+ optimizer_cls = bnb.optim.AdamW8bit
599
+ else:
600
+ optimizer_cls = torch.optim.AdamW
601
+
602
+ # no decay on bias and layernorm and embedding
603
+ no_decay = ["bias", "layer_norm.weight", "mlm_ln.weight", "embeddings.weight"]
604
+ optimizer_grouped_parameters = [
605
+ {
606
+ "params": [p for n, p in model.named_parameters() if not any(nd in n for nd in no_decay)],
607
+ "weight_decay": args.adam_weight_decay,
608
+ },
609
+ {
610
+ "params": [p for n, p in model.named_parameters() if any(nd in n for nd in no_decay)],
611
+ "weight_decay": 0.0,
612
+ },
613
+ ]
614
+
615
+ if args.train_text_encoder:
616
+ optimizer_grouped_parameters.append(
617
+ {"params": text_encoder.parameters(), "weight_decay": args.adam_weight_decay}
618
+ )
619
+
620
+ optimizer = optimizer_cls(
621
+ optimizer_grouped_parameters,
622
+ lr=args.learning_rate,
623
+ betas=(args.adam_beta1, args.adam_beta2),
624
+ weight_decay=args.adam_weight_decay,
625
+ eps=args.adam_epsilon,
626
+ )
627
+
628
+ logger.info("Creating dataloaders and lr_scheduler")
629
+
630
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
631
+
632
+ if args.instance_data_dir is not None:
633
+ dataset = InstanceDataRootDataset(
634
+ instance_data_root=args.instance_data_dir,
635
+ tokenizer=tokenizer,
636
+ size=args.resolution,
637
+ )
638
+ elif args.instance_data_image is not None:
639
+ dataset = InstanceDataImageDataset(
640
+ instance_data_image=args.instance_data_image,
641
+ train_batch_size=args.train_batch_size,
642
+ size=args.resolution,
643
+ )
644
+ elif args.instance_data_dataset is not None:
645
+ dataset = HuggingFaceDataset(
646
+ hf_dataset=load_dataset(args.instance_data_dataset, split="train"),
647
+ tokenizer=tokenizer,
648
+ image_key=args.image_key,
649
+ prompt_key=args.prompt_key,
650
+ prompt_prefix=args.prompt_prefix,
651
+ size=args.resolution,
652
+ )
653
+ else:
654
+ assert False
655
+
656
+ train_dataloader = DataLoader(
657
+ dataset,
658
+ batch_size=args.train_batch_size,
659
+ shuffle=True,
660
+ num_workers=args.dataloader_num_workers,
661
+ collate_fn=default_collate,
662
+ )
663
+ train_dataloader.num_batches = len(train_dataloader)
664
+
665
+ lr_scheduler = diffusers.optimization.get_scheduler(
666
+ args.lr_scheduler,
667
+ optimizer=optimizer,
668
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
669
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
670
+ )
671
+
672
+ logger.info("Preparing model, optimizer and dataloaders")
673
+
674
+ if args.train_text_encoder:
675
+ model, optimizer, lr_scheduler, train_dataloader, text_encoder = accelerator.prepare(
676
+ model, optimizer, lr_scheduler, train_dataloader, text_encoder
677
+ )
678
+ else:
679
+ model, optimizer, lr_scheduler, train_dataloader = accelerator.prepare(
680
+ model, optimizer, lr_scheduler, train_dataloader
681
+ )
682
+
683
+ train_dataloader.num_batches = len(train_dataloader)
684
+
685
+ weight_dtype = torch.float32
686
+ if accelerator.mixed_precision == "fp16":
687
+ weight_dtype = torch.float16
688
+ elif accelerator.mixed_precision == "bf16":
689
+ weight_dtype = torch.bfloat16
690
+
691
+ if not args.train_text_encoder:
692
+ text_encoder.to(device=accelerator.device, dtype=weight_dtype)
693
+
694
+ vq_model.to(device=accelerator.device)
695
+
696
+ if args.use_ema:
697
+ ema.to(accelerator.device)
698
+
699
+ with nullcontext() if args.train_text_encoder else torch.no_grad():
700
+ empty_embeds, empty_clip_embeds = encode_prompt(
701
+ text_encoder, tokenize_prompt(tokenizer, "").to(text_encoder.device, non_blocking=True)
702
+ )
703
+
704
+ # There is a single image, we can just pre-encode the single prompt
705
+ if args.instance_data_image is not None:
706
+ prompt = os.path.splitext(os.path.basename(args.instance_data_image))[0]
707
+ encoder_hidden_states, cond_embeds = encode_prompt(
708
+ text_encoder, tokenize_prompt(tokenizer, prompt).to(text_encoder.device, non_blocking=True)
709
+ )
710
+ encoder_hidden_states = encoder_hidden_states.repeat(args.train_batch_size, 1, 1)
711
+ cond_embeds = cond_embeds.repeat(args.train_batch_size, 1)
712
+
713
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
714
+ num_update_steps_per_epoch = math.ceil(train_dataloader.num_batches / args.gradient_accumulation_steps)
715
+ # Afterwards we recalculate our number of training epochs.
716
+ # Note: We are not doing epoch based training here, but just using this for book keeping and being able to
717
+ # reuse the same training loop with other datasets/loaders.
718
+ num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
719
+
720
+ # Train!
721
+ logger.info("***** Running training *****")
722
+ logger.info(f" Num training steps = {args.max_train_steps}")
723
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
724
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
725
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
726
+
727
+ resume_from_checkpoint = args.resume_from_checkpoint
728
+ if resume_from_checkpoint:
729
+ if resume_from_checkpoint == "latest":
730
+ # Get the most recent checkpoint
731
+ dirs = os.listdir(args.output_dir)
732
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
733
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
734
+ if len(dirs) > 0:
735
+ resume_from_checkpoint = os.path.join(args.output_dir, dirs[-1])
736
+ else:
737
+ resume_from_checkpoint = None
738
+
739
+ if resume_from_checkpoint is None:
740
+ accelerator.print(
741
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
742
+ )
743
+ else:
744
+ accelerator.print(f"Resuming from checkpoint {resume_from_checkpoint}")
745
+
746
+ if resume_from_checkpoint is None:
747
+ global_step = 0
748
+ first_epoch = 0
749
+ else:
750
+ accelerator.load_state(resume_from_checkpoint)
751
+ global_step = int(os.path.basename(resume_from_checkpoint).split("-")[1])
752
+ first_epoch = global_step // num_update_steps_per_epoch
753
+
754
+ # As stated above, we are not doing epoch based training here, but just using this for book keeping and being able to
755
+ # reuse the same training loop with other datasets/loaders.
756
+ for epoch in range(first_epoch, num_train_epochs):
757
+ for batch in train_dataloader:
758
+ with torch.no_grad():
759
+ micro_conds = batch["micro_conds"].to(accelerator.device, non_blocking=True)
760
+ pixel_values = batch["image"].to(accelerator.device, non_blocking=True)
761
+
762
+ batch_size = pixel_values.shape[0]
763
+
764
+ split_batch_size = args.split_vae_encode if args.split_vae_encode is not None else batch_size
765
+ num_splits = math.ceil(batch_size / split_batch_size)
766
+ image_tokens = []
767
+ for i in range(num_splits):
768
+ start_idx = i * split_batch_size
769
+ end_idx = min((i + 1) * split_batch_size, batch_size)
770
+ bs = pixel_values.shape[0]
771
+ image_tokens.append(
772
+ vq_model.quantize(vq_model.encode(pixel_values[start_idx:end_idx]).latents)[2][2].reshape(
773
+ bs, -1
774
+ )
775
+ )
776
+ image_tokens = torch.cat(image_tokens, dim=0)
777
+
778
+ batch_size, seq_len = image_tokens.shape
779
+
780
+ timesteps = torch.rand(batch_size, device=image_tokens.device)
781
+ mask_prob = torch.cos(timesteps * math.pi * 0.5)
782
+ mask_prob = mask_prob.clip(args.min_masking_rate)
783
+
784
+ num_token_masked = (seq_len * mask_prob).round().clamp(min=1)
785
+ batch_randperm = torch.rand(batch_size, seq_len, device=image_tokens.device).argsort(dim=-1)
786
+ mask = batch_randperm < num_token_masked.unsqueeze(-1)
787
+
788
+ mask_id = accelerator.unwrap_model(model).config.vocab_size - 1
789
+ input_ids = torch.where(mask, mask_id, image_tokens)
790
+ labels = torch.where(mask, image_tokens, -100)
791
+
792
+ if args.cond_dropout_prob > 0.0:
793
+ assert encoder_hidden_states is not None
794
+
795
+ batch_size = encoder_hidden_states.shape[0]
796
+
797
+ mask = (
798
+ torch.zeros((batch_size, 1, 1), device=encoder_hidden_states.device).float().uniform_(0, 1)
799
+ < args.cond_dropout_prob
800
+ )
801
+
802
+ empty_embeds_ = empty_embeds.expand(batch_size, -1, -1)
803
+ encoder_hidden_states = torch.where(
804
+ (encoder_hidden_states * mask).bool(), encoder_hidden_states, empty_embeds_
805
+ )
806
+
807
+ empty_clip_embeds_ = empty_clip_embeds.expand(batch_size, -1)
808
+ cond_embeds = torch.where((cond_embeds * mask.squeeze(-1)).bool(), cond_embeds, empty_clip_embeds_)
809
+
810
+ bs = input_ids.shape[0]
811
+ vae_scale_factor = 2 ** (len(vq_model.config.block_out_channels) - 1)
812
+ resolution = args.resolution // vae_scale_factor
813
+ input_ids = input_ids.reshape(bs, resolution, resolution)
814
+
815
+ if "prompt_input_ids" in batch:
816
+ with nullcontext() if args.train_text_encoder else torch.no_grad():
817
+ encoder_hidden_states, cond_embeds = encode_prompt(
818
+ text_encoder, batch["prompt_input_ids"].to(accelerator.device, non_blocking=True)
819
+ )
820
+
821
+ # Train Step
822
+ with accelerator.accumulate(model):
823
+ codebook_size = accelerator.unwrap_model(model).config.codebook_size
824
+
825
+ logits = (
826
+ model(
827
+ input_ids=input_ids,
828
+ encoder_hidden_states=encoder_hidden_states,
829
+ micro_conds=micro_conds,
830
+ pooled_text_emb=cond_embeds,
831
+ )
832
+ .reshape(bs, codebook_size, -1)
833
+ .permute(0, 2, 1)
834
+ .reshape(-1, codebook_size)
835
+ )
836
+
837
+ loss = F.cross_entropy(
838
+ logits,
839
+ labels.view(-1),
840
+ ignore_index=-100,
841
+ reduction="mean",
842
+ )
843
+
844
+ # Gather the losses across all processes for logging (if we use distributed training).
845
+ avg_loss = accelerator.gather(loss.repeat(args.train_batch_size)).mean()
846
+ avg_masking_rate = accelerator.gather(mask_prob.repeat(args.train_batch_size)).mean()
847
+
848
+ accelerator.backward(loss)
849
+
850
+ if args.max_grad_norm is not None and accelerator.sync_gradients:
851
+ accelerator.clip_grad_norm_(model.parameters(), args.max_grad_norm)
852
+
853
+ optimizer.step()
854
+ lr_scheduler.step()
855
+
856
+ optimizer.zero_grad(set_to_none=True)
857
+
858
+ # Checks if the accelerator has performed an optimization step behind the scenes
859
+ if accelerator.sync_gradients:
860
+ if args.use_ema:
861
+ ema.step(model.parameters())
862
+
863
+ if (global_step + 1) % args.logging_steps == 0:
864
+ logs = {
865
+ "step_loss": avg_loss.item(),
866
+ "lr": lr_scheduler.get_last_lr()[0],
867
+ "avg_masking_rate": avg_masking_rate.item(),
868
+ }
869
+ accelerator.log(logs, step=global_step + 1)
870
+
871
+ logger.info(
872
+ f"Step: {global_step + 1} "
873
+ f"Loss: {avg_loss.item():0.4f} "
874
+ f"LR: {lr_scheduler.get_last_lr()[0]:0.6f}"
875
+ )
876
+
877
+ if (global_step + 1) % args.checkpointing_steps == 0:
878
+ save_checkpoint(args, accelerator, global_step + 1)
879
+
880
+ if (global_step + 1) % args.validation_steps == 0 and accelerator.is_main_process:
881
+ if args.use_ema:
882
+ ema.store(model.parameters())
883
+ ema.copy_to(model.parameters())
884
+
885
+ with torch.no_grad():
886
+ logger.info("Generating images...")
887
+
888
+ model.eval()
889
+
890
+ if args.train_text_encoder:
891
+ text_encoder.eval()
892
+
893
+ scheduler = AmusedScheduler.from_pretrained(
894
+ args.pretrained_model_name_or_path,
895
+ subfolder="scheduler",
896
+ revision=args.revision,
897
+ variant=args.variant,
898
+ )
899
+
900
+ pipe = AmusedPipeline(
901
+ transformer=accelerator.unwrap_model(model),
902
+ tokenizer=tokenizer,
903
+ text_encoder=text_encoder,
904
+ vqvae=vq_model,
905
+ scheduler=scheduler,
906
+ )
907
+
908
+ pil_images = pipe(prompt=args.validation_prompts).images
909
+ wandb_images = [
910
+ wandb.Image(image, caption=args.validation_prompts[i])
911
+ for i, image in enumerate(pil_images)
912
+ ]
913
+
914
+ wandb.log({"generated_images": wandb_images}, step=global_step + 1)
915
+
916
+ model.train()
917
+
918
+ if args.train_text_encoder:
919
+ text_encoder.train()
920
+
921
+ if args.use_ema:
922
+ ema.restore(model.parameters())
923
+
924
+ global_step += 1
925
+
926
+ # Stop training if max steps is reached
927
+ if global_step >= args.max_train_steps:
928
+ break
929
+ # End for
930
+
931
+ accelerator.wait_for_everyone()
932
+
933
+ # Evaluate and save checkpoint at the end of training
934
+ save_checkpoint(args, accelerator, global_step)
935
+
936
+ # Save the final trained checkpoint
937
+ if accelerator.is_main_process:
938
+ model = accelerator.unwrap_model(model)
939
+ if args.use_ema:
940
+ ema.copy_to(model.parameters())
941
+ model.save_pretrained(args.output_dir)
942
+
943
+ accelerator.end_training()
944
+
945
+
946
+ def save_checkpoint(args, accelerator, global_step):
947
+ output_dir = args.output_dir
948
+
949
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
950
+ if accelerator.is_main_process and args.checkpoints_total_limit is not None:
951
+ checkpoints = os.listdir(output_dir)
952
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
953
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
954
+
955
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
956
+ if len(checkpoints) >= args.checkpoints_total_limit:
957
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
958
+ removing_checkpoints = checkpoints[0:num_to_remove]
959
+
960
+ logger.info(
961
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
962
+ )
963
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
964
+
965
+ for removing_checkpoint in removing_checkpoints:
966
+ removing_checkpoint = os.path.join(output_dir, removing_checkpoint)
967
+ shutil.rmtree(removing_checkpoint)
968
+
969
+ save_path = Path(output_dir) / f"checkpoint-{global_step}"
970
+ accelerator.save_state(save_path)
971
+ logger.info(f"Saved state to {save_path}")
972
+
973
+
974
+ if __name__ == "__main__":
975
+ main(parse_args())
exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/README.md ADDED
@@ -0,0 +1,238 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # LoRA finetuning example for CogVideoX
2
+
3
+ Low-Rank Adaption of Large Language Models was first introduced by Microsoft in [LoRA: Low-Rank Adaptation of Large Language Models](https://huggingface.co/papers/2106.09685) by *Edward J. Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, Weizhu Chen*.
4
+
5
+ In a nutshell, LoRA allows adapting pretrained models by adding pairs of rank-decomposition matrices to existing weights and **only** training those newly added weights. This has a couple of advantages:
6
+
7
+ - Previous pretrained weights are kept frozen so that model is not prone to [catastrophic forgetting](https://www.pnas.org/doi/10.1073/pnas.1611835114).
8
+ - Rank-decomposition matrices have significantly fewer parameters than original model, which means that trained LoRA weights are easily portable.
9
+ - LoRA attention layers allow to control to which extent the model is adapted toward new training images via a `scale` parameter.
10
+
11
+ At the moment, LoRA finetuning has only been tested for [CogVideoX-2b](https://huggingface.co/THUDM/CogVideoX-2b).
12
+
13
+ > [!NOTE]
14
+ > The scripts for CogVideoX come with limited support and may not be fully compatible with different training techniques. They are not feature-rich either and simply serve as minimal examples of finetuning to take inspiration from and improve.
15
+ >
16
+ > A repository containing memory-optimized finetuning scripts with support for multiple resolutions, dataset preparation, captioning, etc. is available [here](https://github.com/a-r-r-o-w/cogvideox-factory), which will be maintained jointly by the CogVideoX and Diffusers team.
17
+
18
+ ## Data Preparation
19
+
20
+ The training scripts accepts data in two formats.
21
+
22
+ **First data format**
23
+
24
+ Two files where one file contains line-separated prompts and another file contains line-separated paths to video data (the path to video files must be relative to the path you pass when specifying `--instance_data_root`). Let's take a look at an example to understand this better!
25
+
26
+ Assume you've specified `--instance_data_root` as `/dataset`, and that this directory contains the files: `prompts.txt` and `videos.txt`.
27
+
28
+ The `prompts.txt` file should contain line-separated prompts:
29
+
30
+ ```
31
+ A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction.
32
+ A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures.
33
+ ...
34
+ ```
35
+
36
+ The `videos.txt` file should contain line-separate paths to video files. Note that the path should be _relative_ to the `--instance_data_root` directory.
37
+
38
+ ```
39
+ videos/00000.mp4
40
+ videos/00001.mp4
41
+ ...
42
+ ```
43
+
44
+ Overall, this is how your dataset would look like if you ran the `tree` command on the dataset root directory:
45
+
46
+ ```
47
+ /dataset
48
+ ├── prompts.txt
49
+ ├── videos.txt
50
+ ├── videos
51
+ ├── videos/00000.mp4
52
+ ├── videos/00001.mp4
53
+ ├── ...
54
+ ```
55
+
56
+ When using this format, the `--caption_column` must be `prompts.txt` and `--video_column` must be `videos.txt`.
57
+
58
+ **Second data format**
59
+
60
+ You could use a single CSV file. For the sake of this example, assume you have a `metadata.csv` file. The expected format is:
61
+
62
+ ```
63
+ <CAPTION_COLUMN>,<PATH_TO_VIDEO_COLUMN>
64
+ """A black and white animated sequence featuring a rabbit, named Rabbity Ribfried, and an anthropomorphic goat in a musical, playful environment, showcasing their evolving interaction.""","""00000.mp4"""
65
+ """A black and white animated sequence on a ship's deck features a bulldog character, named Bully Bulldoger, showcasing exaggerated facial expressions and body language. The character progresses from confident to focused, then to strained and distressed, displaying a range of emotions as it navigates challenges. The ship's interior remains static in the background, with minimalistic details such as a bell and open door. The character's dynamic movements and changing expressions drive the narrative, with no camera movement to distract from its evolving reactions and physical gestures.""","""00001.mp4"""
66
+ ...
67
+ ```
68
+
69
+ In this case, the `--instance_data_root` should be the location where the videos are stored and `--dataset_name` should be either a path to local folder or `load_dataset` compatible hosted HF Dataset Repository or URL. Assuming you have videos of your Minecraft gameplay at `https://huggingface.co/datasets/my-awesome-username/minecraft-videos`, you would have to specify `my-awesome-username/minecraft-videos`.
70
+
71
+ When using this format, the `--caption_column` must be `<CAPTION_COLUMN>` and `--video_column` must be `<PATH_TO_VIDEO_COLUMN>`.
72
+
73
+ You are not strictly restricted to the CSV format. As long as the `load_dataset` method supports the file format to load a basic `<PATH_TO_VIDEO_COLUMN>` and `<CAPTION_COLUMN>`, you should be good to go. The reason for going through these dataset organization gymnastics for loading video data is because we found `load_dataset` from the datasets library to not fully support all kinds of video formats. This will undoubtedly be improved in the future.
74
+
75
+ >![NOTE]
76
+ > CogVideoX works best with long and descriptive LLM-augmented prompts for video generation. We recommend pre-processing your videos by first generating a summary using a VLM and then augmenting the prompts with an LLM. To generate the above captions, we use [MiniCPM-V-26](https://huggingface.co/openbmb/MiniCPM-V-2_6) and [Llama-3.1-8B-Instruct](https://huggingface.co/meta-llama/Meta-Llama-3.1-8B-Instruct). A very barebones and no-frills example for this is available [here](https://gist.github.com/a-r-r-o-w/4dee20250e82f4e44690a02351324a4a). The official recommendation for augmenting prompts is [ChatGLM](https://huggingface.co/THUDM?search_models=chatglm) and a length of 50-100 words is considered good.
77
+
78
+ >![NOTE]
79
+ > It is expected that your dataset is already pre-processed. If not, some basic pre-processing can be done by playing with the following parameters:
80
+ > `--height`, `--width`, `--fps`, `--max_num_frames`, `--skip_frames_start` and `--skip_frames_end`.
81
+ > Presently, all videos in your dataset should contain the same number of video frames when using a training batch size > 1.
82
+
83
+ <!-- TODO: Implement frame packing in future to address above issue. -->
84
+
85
+ ## Training
86
+
87
+ You need to setup your development environment by installing the necessary requirements. The following packages are required:
88
+ - Torch 2.0 or above based on the training features you are utilizing (might require latest or nightly versions for quantized/deepspeed training)
89
+ - `pip install diffusers transformers accelerate peft huggingface_hub` for all things modeling and training related
90
+ - `pip install datasets decord` for loading video training data
91
+ - `pip install bitsandbytes` for using 8-bit Adam or AdamW optimizers for memory-optimized training
92
+ - `pip install wandb` optionally for monitoring training logs
93
+ - `pip install deepspeed` optionally for [DeepSpeed](https://github.com/microsoft/DeepSpeed) training
94
+ - `pip install prodigyopt` optionally if you would like to use the Prodigy optimizer for training
95
+
96
+ To make sure you can successfully run the latest versions of the example scripts, we highly recommend **installing from source** and keeping the install up to date as we update the example scripts frequently and install some example-specific requirements. To do this, execute the following steps in a new virtual environment:
97
+
98
+ ```bash
99
+ git clone https://github.com/huggingface/diffusers
100
+ cd diffusers
101
+ pip install -e .
102
+ ```
103
+
104
+ And initialize an [🤗 Accelerate](https://github.com/huggingface/accelerate/) environment with:
105
+
106
+ ```bash
107
+ accelerate config
108
+ ```
109
+
110
+ Or for a default accelerate configuration without answering questions about your environment
111
+
112
+ ```bash
113
+ accelerate config default
114
+ ```
115
+
116
+ Or if your environment doesn't support an interactive shell (e.g., a notebook)
117
+
118
+ ```python
119
+ from accelerate.utils import write_basic_config
120
+ write_basic_config()
121
+ ```
122
+
123
+ When running `accelerate config`, if we specify torch compile mode to True there can be dramatic speedups. Note also that we use PEFT library as backend for LoRA training, make sure to have `peft>=0.6.0` installed in your environment.
124
+
125
+ If you would like to push your model to the HF Hub after training is completed with a neat model card, make sure you're logged in:
126
+
127
+ ```
128
+ hf auth login
129
+
130
+ # Alternatively, you could upload your model manually using:
131
+ # hf upload my-cool-account-name/my-cool-lora-name /path/to/awesome/lora
132
+ ```
133
+
134
+ Make sure your data is prepared as described in [Data Preparation](#data-preparation). When ready, you can begin training!
135
+
136
+ Assuming you are training on 50 videos of a similar concept, we have found 1500-2000 steps to work well. The official recommendation, however, is 100 videos with a total of 4000 steps. Assuming you are training on a single GPU with a `--train_batch_size` of `1`:
137
+ - 1500 steps on 50 videos would correspond to `30` training epochs
138
+ - 4000 steps on 100 videos would correspond to `40` training epochs
139
+
140
+ The following bash script launches training for text-to-video lora.
141
+
142
+ ```bash
143
+ #!/bin/bash
144
+
145
+ GPU_IDS="0"
146
+
147
+ accelerate launch --gpu_ids $GPU_IDS examples/cogvideo/train_cogvideox_lora.py \
148
+ --pretrained_model_name_or_path THUDM/CogVideoX-2b \
149
+ --cache_dir <CACHE_DIR> \
150
+ --instance_data_root <PATH_TO_WHERE_VIDEO_FILES_ARE_STORED> \
151
+ --dataset_name my-awesome-name/my-awesome-dataset \
152
+ --caption_column <CAPTION_COLUMN> \
153
+ --video_column <PATH_TO_VIDEO_COLUMN> \
154
+ --id_token <ID_TOKEN> \
155
+ --validation_prompt "<ID_TOKEN> Spiderman swinging over buildings:::A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical atmosphere of this unique musical performance" \
156
+ --validation_prompt_separator ::: \
157
+ --num_validation_videos 1 \
158
+ --validation_epochs 10 \
159
+ --seed 42 \
160
+ --rank 64 \
161
+ --lora_alpha 64 \
162
+ --mixed_precision fp16 \
163
+ --output_dir /raid/aryan/cogvideox-lora \
164
+ --height 480 --width 720 --fps 8 --max_num_frames 49 --skip_frames_start 0 --skip_frames_end 0 \
165
+ --train_batch_size 1 \
166
+ --num_train_epochs 30 \
167
+ --checkpointing_steps 1000 \
168
+ --gradient_accumulation_steps 1 \
169
+ --learning_rate 1e-3 \
170
+ --lr_scheduler cosine_with_restarts \
171
+ --lr_warmup_steps 200 \
172
+ --lr_num_cycles 1 \
173
+ --enable_slicing \
174
+ --enable_tiling \
175
+ --optimizer Adam \
176
+ --adam_beta1 0.9 \
177
+ --adam_beta2 0.95 \
178
+ --max_grad_norm 1.0 \
179
+ --report_to wandb
180
+ ```
181
+
182
+ For launching image-to-video finetuning instead, run the `train_cogvideox_image_to_video_lora.py` file instead. Additionally, you will have to pass `--validation_images` as paths to initial images corresponding to `--validation_prompts` for I2V validation to work.
183
+
184
+ To better track our training experiments, we're using the following flags in the command above:
185
+ * `--report_to wandb` will ensure the training runs are tracked on Weights and Biases. To use it, be sure to install `wandb` with `pip install wandb`.
186
+ * `validation_prompt` and `validation_epochs` to allow the script to do a few validation inference runs. This allows us to qualitatively check if the training is progressing as expected.
187
+
188
+ Note that setting the `<ID_TOKEN>` is not necessary. From some limited experimentation, we found it to work better (as it resembles [Dreambooth](https://huggingface.co/docs/diffusers/en/training/dreambooth) like training) than without. When provided, the ID_TOKEN is appended to the beginning of each prompt. So, if your ID_TOKEN was `"DISNEY"` and your prompt was `"Spiderman swinging over buildings"`, the effective prompt used in training would be `"DISNEY Spiderman swinging over buildings"`. When not provided, you would either be training without any such additional token or could augment your dataset to apply the token where you wish before starting the training.
189
+
190
+ > [!TIP]
191
+ > You can pass `--use_8bit_adam` to reduce the memory requirements of training.
192
+ > You can pass `--video_reshape_mode` video cropping functionality, supporting options: ['center', 'random', 'none']. See [this](https://gist.github.com/glide-the/7658dbfd5f555be0a1a687a4139dba40) notebook for examples.
193
+
194
+ > [!IMPORTANT]
195
+ > The following settings have been tested at the time of adding CogVideoX LoRA training support:
196
+ > - Our testing was primarily done on CogVideoX-2b. We will work on CogVideoX-5b and CogVideoX-5b-I2V soon
197
+ > - One dataset comprised of 70 training videos of resolutions `200 x 480 x 720` (F x H x W). From this, by using frame skipping in data preprocessing, we created two smaller 49-frame and 16-frame datasets for faster experimentation and because the maximum limit recommended by the CogVideoX team is 49 frames. Out of the 70 videos, we created three groups of 10, 25 and 50 videos. All videos were similar in nature of the concept being trained.
198
+ > - 25+ videos worked best for training new concepts and styles.
199
+ > - We found that it is better to train with an identifier token that can be specified as `--id_token`. This is similar to Dreambooth-like training but normal finetuning without such a token works too.
200
+ > - Trained concept seemed to work decently well when combined with completely unrelated prompts. We expect even better results if CogVideoX-5B is finetuned.
201
+ > - The original repository uses a `lora_alpha` of `1`. We found this not suitable in many runs, possibly due to difference in modeling backends and training settings. Our recommendation is to set to the `lora_alpha` to either `rank` or `rank // 2`.
202
+ > - If you're training on data whose captions generate bad results with the original model, a `rank` of 64 and above is good and also the recommendation by the team behind CogVideoX. If the generations are already moderately good on your training captions, a `rank` of 16/32 should work. We found that setting the rank too low, say `4`, is not ideal and doesn't produce promising results.
203
+ > - The authors of CogVideoX recommend 4000 training steps and 100 training videos overall to achieve the best result. While that might yield the best results, we found from our limited experimentation that 2000 steps and 25 videos could also be sufficient.
204
+ > - When using the Prodigy optimizer for training, one can follow the recommendations from [this](https://huggingface.co/blog/sdxl_lora_advanced_script) blog. Prodigy tends to overfit quickly. From my very limited testing, I found a learning rate of `0.5` to be suitable in addition to `--prodigy_use_bias_correction`, `prodigy_safeguard_warmup` and `--prodigy_decouple`.
205
+ > - The recommended learning rate by the CogVideoX authors and from our experimentation with Adam/AdamW is between `1e-3` and `1e-4` for a dataset of 25+ videos.
206
+ >
207
+ > Note that our testing is not exhaustive due to limited time for exploration. Our recommendation would be to play around with the different knobs and dials to find the best settings for your data.
208
+
209
+ ## Inference
210
+
211
+ Once you have trained a lora model, the inference can be done simply loading the lora weights into the `CogVideoXPipeline`.
212
+
213
+ ```python
214
+ import torch
215
+ from diffusers import CogVideoXPipeline
216
+ from diffusers.utils import export_to_video
217
+
218
+ pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-2b", torch_dtype=torch.float16)
219
+ # pipe.load_lora_weights("/path/to/lora/weights", adapter_name="cogvideox-lora") # Or,
220
+ pipe.load_lora_weights("my-awesome-hf-username/my-awesome-lora-name", adapter_name="cogvideox-lora") # If loading from the HF Hub
221
+ pipe.to("cuda")
222
+
223
+ # Assuming lora_alpha=32 and rank=64 for training. If different, set accordingly
224
+ pipe.set_adapters(["cogvideox-lora"], [32 / 64])
225
+
226
+ prompt = (
227
+ "A panda, dressed in a small, red jacket and a tiny hat, sits on a wooden stool in a serene bamboo forest. The "
228
+ "panda's fluffy paws strum a miniature acoustic guitar, producing soft, melodic tunes. Nearby, a few other "
229
+ "pandas gather, watching curiously and some clapping in rhythm. Sunlight filters through the tall bamboo, "
230
+ "casting a gentle glow on the scene. The panda's face is expressive, showing concentration and joy as it plays. "
231
+ "The background includes a small, flowing stream and vibrant green foliage, enhancing the peaceful and magical "
232
+ "atmosphere of this unique musical performance"
233
+ )
234
+ frames = pipe(prompt, guidance_scale=6, use_dynamic_cfg=True).frames[0]
235
+ export_to_video(frames, "output.mp4", fps=8)
236
+ ```
237
+
238
+ If you've trained a LoRA for `CogVideoXImageToVideoPipeline` instead, everything in the above example remains the same except you must also pass an image as initial condition for generation.
exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/requirements.txt ADDED
@@ -0,0 +1,10 @@
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate>=0.31.0
2
+ torchvision
3
+ transformers>=4.41.2
4
+ ftfy
5
+ tensorboard
6
+ Jinja2
7
+ peft>=0.11.1
8
+ sentencepiece
9
+ decord>=0.6.0
10
+ imageio-ffmpeg
exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/train_cogvideox_image_to_video_lora.py ADDED
@@ -0,0 +1,1619 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import logging
18
+ import math
19
+ import os
20
+ import random
21
+ import shutil
22
+ from datetime import timedelta
23
+ from pathlib import Path
24
+ from typing import List, Optional, Tuple, Union
25
+
26
+ import torch
27
+ import transformers
28
+ from accelerate import Accelerator, DistributedType
29
+ from accelerate.logging import get_logger
30
+ from accelerate.utils import DistributedDataParallelKwargs, InitProcessGroupKwargs, ProjectConfiguration, set_seed
31
+ from huggingface_hub import create_repo, upload_folder
32
+ from peft import LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict
33
+ from torch.utils.data import DataLoader, Dataset
34
+ from torchvision import transforms
35
+ from tqdm.auto import tqdm
36
+ from transformers import AutoTokenizer, T5EncoderModel, T5Tokenizer
37
+
38
+ import diffusers
39
+ from diffusers import (
40
+ AutoencoderKLCogVideoX,
41
+ CogVideoXDPMScheduler,
42
+ CogVideoXImageToVideoPipeline,
43
+ CogVideoXTransformer3DModel,
44
+ )
45
+ from diffusers.models.embeddings import get_3d_rotary_pos_embed
46
+ from diffusers.optimization import get_scheduler
47
+ from diffusers.pipelines.cogvideo.pipeline_cogvideox import get_resize_crop_region_for_grid
48
+ from diffusers.training_utils import cast_training_params, free_memory
49
+ from diffusers.utils import (
50
+ check_min_version,
51
+ convert_unet_state_dict_to_peft,
52
+ export_to_video,
53
+ is_wandb_available,
54
+ load_image,
55
+ )
56
+ from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
57
+ from diffusers.utils.torch_utils import is_compiled_module
58
+
59
+
60
+ if is_wandb_available():
61
+ import wandb
62
+
63
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
64
+ check_min_version("0.36.0.dev0")
65
+
66
+ logger = get_logger(__name__)
67
+
68
+
69
+ def get_args():
70
+ parser = argparse.ArgumentParser(description="Simple example of a training script for CogVideoX.")
71
+
72
+ # Model information
73
+ parser.add_argument(
74
+ "--pretrained_model_name_or_path",
75
+ type=str,
76
+ default=None,
77
+ required=True,
78
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
79
+ )
80
+ parser.add_argument(
81
+ "--revision",
82
+ type=str,
83
+ default=None,
84
+ required=False,
85
+ help="Revision of pretrained model identifier from huggingface.co/models.",
86
+ )
87
+ parser.add_argument(
88
+ "--variant",
89
+ type=str,
90
+ default=None,
91
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
92
+ )
93
+ parser.add_argument(
94
+ "--cache_dir",
95
+ type=str,
96
+ default=None,
97
+ help="The directory where the downloaded models and datasets will be stored.",
98
+ )
99
+
100
+ # Dataset information
101
+ parser.add_argument(
102
+ "--dataset_name",
103
+ type=str,
104
+ default=None,
105
+ help=(
106
+ "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
107
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
108
+ " or to a folder containing files that 🤗 Datasets can understand."
109
+ ),
110
+ )
111
+ parser.add_argument(
112
+ "--dataset_config_name",
113
+ type=str,
114
+ default=None,
115
+ help="The config of the Dataset, leave as None if there's only one config.",
116
+ )
117
+ parser.add_argument(
118
+ "--instance_data_root",
119
+ type=str,
120
+ default=None,
121
+ help=("A folder containing the training data."),
122
+ )
123
+ parser.add_argument(
124
+ "--video_column",
125
+ type=str,
126
+ default="video",
127
+ help="The column of the dataset containing videos. Or, the name of the file in `--instance_data_root` folder containing the line-separated path to video data.",
128
+ )
129
+ parser.add_argument(
130
+ "--caption_column",
131
+ type=str,
132
+ default="text",
133
+ help="The column of the dataset containing the instance prompt for each video. Or, the name of the file in `--instance_data_root` folder containing the line-separated instance prompts.",
134
+ )
135
+ parser.add_argument(
136
+ "--id_token", type=str, default=None, help="Identifier token appended to the start of each prompt if provided."
137
+ )
138
+ parser.add_argument(
139
+ "--dataloader_num_workers",
140
+ type=int,
141
+ default=0,
142
+ help=(
143
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
144
+ ),
145
+ )
146
+
147
+ # Validation
148
+ parser.add_argument(
149
+ "--validation_prompt",
150
+ type=str,
151
+ default=None,
152
+ help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_seperator' string.",
153
+ )
154
+ parser.add_argument(
155
+ "--validation_images",
156
+ type=str,
157
+ default=None,
158
+ help="One or more image path(s) that is used during validation to verify that the model is learning. Multiple validation paths should be separated by the '--validation_prompt_seperator' string. These should correspond to the order of the validation prompts.",
159
+ )
160
+ parser.add_argument(
161
+ "--validation_prompt_separator",
162
+ type=str,
163
+ default=":::",
164
+ help="String that separates multiple validation prompts",
165
+ )
166
+ parser.add_argument(
167
+ "--num_validation_videos",
168
+ type=int,
169
+ default=1,
170
+ help="Number of videos that should be generated during validation per `validation_prompt`.",
171
+ )
172
+ parser.add_argument(
173
+ "--validation_epochs",
174
+ type=int,
175
+ default=50,
176
+ help=(
177
+ "Run validation every X epochs. Validation consists of running the prompt `args.validation_prompt` multiple times: `args.num_validation_videos`."
178
+ ),
179
+ )
180
+ parser.add_argument(
181
+ "--guidance_scale",
182
+ type=float,
183
+ default=6,
184
+ help="The guidance scale to use while sampling validation videos.",
185
+ )
186
+ parser.add_argument(
187
+ "--use_dynamic_cfg",
188
+ action="store_true",
189
+ default=False,
190
+ help="Whether or not to use the default cosine dynamic guidance schedule when sampling validation videos.",
191
+ )
192
+
193
+ # Training information
194
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
195
+ parser.add_argument(
196
+ "--rank",
197
+ type=int,
198
+ default=128,
199
+ help=("The dimension of the LoRA update matrices."),
200
+ )
201
+ parser.add_argument(
202
+ "--lora_alpha",
203
+ type=float,
204
+ default=128,
205
+ help=("The scaling factor to scale LoRA weight update. The actual scaling factor is `lora_alpha / rank`"),
206
+ )
207
+ parser.add_argument(
208
+ "--mixed_precision",
209
+ type=str,
210
+ default=None,
211
+ choices=["no", "fp16", "bf16"],
212
+ help=(
213
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
214
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
215
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
216
+ ),
217
+ )
218
+ parser.add_argument(
219
+ "--output_dir",
220
+ type=str,
221
+ default="cogvideox-i2v-lora",
222
+ help="The output directory where the model predictions and checkpoints will be written.",
223
+ )
224
+ parser.add_argument(
225
+ "--height",
226
+ type=int,
227
+ default=480,
228
+ help="All input videos are resized to this height.",
229
+ )
230
+ parser.add_argument(
231
+ "--width",
232
+ type=int,
233
+ default=720,
234
+ help="All input videos are resized to this width.",
235
+ )
236
+ parser.add_argument("--fps", type=int, default=8, help="All input videos will be used at this FPS.")
237
+ parser.add_argument(
238
+ "--max_num_frames", type=int, default=49, help="All input videos will be truncated to these many frames."
239
+ )
240
+ parser.add_argument(
241
+ "--skip_frames_start",
242
+ type=int,
243
+ default=0,
244
+ help="Number of frames to skip from the beginning of each input video. Useful if training data contains intro sequences.",
245
+ )
246
+ parser.add_argument(
247
+ "--skip_frames_end",
248
+ type=int,
249
+ default=0,
250
+ help="Number of frames to skip from the end of each input video. Useful if training data contains outro sequences.",
251
+ )
252
+ parser.add_argument(
253
+ "--random_flip",
254
+ action="store_true",
255
+ help="whether to randomly flip videos horizontally",
256
+ )
257
+ parser.add_argument(
258
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
259
+ )
260
+ parser.add_argument("--num_train_epochs", type=int, default=1)
261
+ parser.add_argument(
262
+ "--max_train_steps",
263
+ type=int,
264
+ default=None,
265
+ help="Total number of training steps to perform. If provided, overrides `--num_train_epochs`.",
266
+ )
267
+ parser.add_argument(
268
+ "--checkpointing_steps",
269
+ type=int,
270
+ default=500,
271
+ help=(
272
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
273
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
274
+ " training using `--resume_from_checkpoint`."
275
+ ),
276
+ )
277
+ parser.add_argument(
278
+ "--checkpoints_total_limit",
279
+ type=int,
280
+ default=None,
281
+ help=("Max number of checkpoints to store."),
282
+ )
283
+ parser.add_argument(
284
+ "--resume_from_checkpoint",
285
+ type=str,
286
+ default=None,
287
+ help=(
288
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
289
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
290
+ ),
291
+ )
292
+ parser.add_argument(
293
+ "--gradient_accumulation_steps",
294
+ type=int,
295
+ default=1,
296
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
297
+ )
298
+ parser.add_argument(
299
+ "--gradient_checkpointing",
300
+ action="store_true",
301
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
302
+ )
303
+ parser.add_argument(
304
+ "--learning_rate",
305
+ type=float,
306
+ default=1e-4,
307
+ help="Initial learning rate (after the potential warmup period) to use.",
308
+ )
309
+ parser.add_argument(
310
+ "--scale_lr",
311
+ action="store_true",
312
+ default=False,
313
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
314
+ )
315
+ parser.add_argument(
316
+ "--lr_scheduler",
317
+ type=str,
318
+ default="constant",
319
+ help=(
320
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
321
+ ' "constant", "constant_with_warmup"]'
322
+ ),
323
+ )
324
+ parser.add_argument(
325
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
326
+ )
327
+ parser.add_argument(
328
+ "--lr_num_cycles",
329
+ type=int,
330
+ default=1,
331
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
332
+ )
333
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
334
+ parser.add_argument(
335
+ "--enable_slicing",
336
+ action="store_true",
337
+ default=False,
338
+ help="Whether or not to use VAE slicing for saving memory.",
339
+ )
340
+ parser.add_argument(
341
+ "--enable_tiling",
342
+ action="store_true",
343
+ default=False,
344
+ help="Whether or not to use VAE tiling for saving memory.",
345
+ )
346
+ parser.add_argument(
347
+ "--noised_image_dropout",
348
+ type=float,
349
+ default=0.05,
350
+ help="Image condition dropout probability.",
351
+ )
352
+
353
+ # Optimizer
354
+ parser.add_argument(
355
+ "--optimizer",
356
+ type=lambda s: s.lower(),
357
+ default="adam",
358
+ choices=["adam", "adamw", "prodigy"],
359
+ help=("The optimizer type to use."),
360
+ )
361
+ parser.add_argument(
362
+ "--use_8bit_adam",
363
+ action="store_true",
364
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
365
+ )
366
+ parser.add_argument(
367
+ "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers."
368
+ )
369
+ parser.add_argument(
370
+ "--adam_beta2", type=float, default=0.95, help="The beta2 parameter for the Adam and Prodigy optimizers."
371
+ )
372
+ parser.add_argument(
373
+ "--prodigy_beta3",
374
+ type=float,
375
+ default=None,
376
+ help="Coefficients for computing the Prodigy optimizer's stepsize using running averages. If set to None, uses the value of square root of beta2.",
377
+ )
378
+ parser.add_argument("--prodigy_decouple", action="store_true", help="Use AdamW style decoupled weight decay")
379
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
380
+ parser.add_argument(
381
+ "--adam_epsilon",
382
+ type=float,
383
+ default=1e-08,
384
+ help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
385
+ )
386
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
387
+ parser.add_argument("--prodigy_use_bias_correction", action="store_true", help="Turn on Adam's bias correction.")
388
+ parser.add_argument(
389
+ "--prodigy_safeguard_warmup",
390
+ action="store_true",
391
+ help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage.",
392
+ )
393
+
394
+ # Other information
395
+ parser.add_argument("--tracker_name", type=str, default=None, help="Project tracker name")
396
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
397
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
398
+ parser.add_argument(
399
+ "--hub_model_id",
400
+ type=str,
401
+ default=None,
402
+ help="The name of the repository to keep in sync with the local `output_dir`.",
403
+ )
404
+ parser.add_argument(
405
+ "--logging_dir",
406
+ type=str,
407
+ default="logs",
408
+ help="Directory where logs are stored.",
409
+ )
410
+ parser.add_argument(
411
+ "--allow_tf32",
412
+ action="store_true",
413
+ help=(
414
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
415
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
416
+ ),
417
+ )
418
+ parser.add_argument(
419
+ "--report_to",
420
+ type=str,
421
+ default=None,
422
+ help=(
423
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
424
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
425
+ ),
426
+ )
427
+ parser.add_argument("--nccl_timeout", type=int, default=600, help="NCCL backend timeout in seconds.")
428
+
429
+ return parser.parse_args()
430
+
431
+
432
+ class VideoDataset(Dataset):
433
+ def __init__(
434
+ self,
435
+ instance_data_root: Optional[str] = None,
436
+ dataset_name: Optional[str] = None,
437
+ dataset_config_name: Optional[str] = None,
438
+ caption_column: str = "text",
439
+ video_column: str = "video",
440
+ height: int = 480,
441
+ width: int = 720,
442
+ fps: int = 8,
443
+ max_num_frames: int = 49,
444
+ skip_frames_start: int = 0,
445
+ skip_frames_end: int = 0,
446
+ cache_dir: Optional[str] = None,
447
+ id_token: Optional[str] = None,
448
+ ) -> None:
449
+ super().__init__()
450
+
451
+ self.instance_data_root = Path(instance_data_root) if instance_data_root is not None else None
452
+ self.dataset_name = dataset_name
453
+ self.dataset_config_name = dataset_config_name
454
+ self.caption_column = caption_column
455
+ self.video_column = video_column
456
+ self.height = height
457
+ self.width = width
458
+ self.fps = fps
459
+ self.max_num_frames = max_num_frames
460
+ self.skip_frames_start = skip_frames_start
461
+ self.skip_frames_end = skip_frames_end
462
+ self.cache_dir = cache_dir
463
+ self.id_token = id_token or ""
464
+
465
+ if dataset_name is not None:
466
+ self.instance_prompts, self.instance_video_paths = self._load_dataset_from_hub()
467
+ else:
468
+ self.instance_prompts, self.instance_video_paths = self._load_dataset_from_local_path()
469
+
470
+ self.instance_prompts = [self.id_token + prompt for prompt in self.instance_prompts]
471
+
472
+ self.num_instance_videos = len(self.instance_video_paths)
473
+ if self.num_instance_videos != len(self.instance_prompts):
474
+ raise ValueError(
475
+ f"Expected length of instance prompts and videos to be the same but found {len(self.instance_prompts)=} and {len(self.instance_video_paths)=}. Please ensure that the number of caption prompts and videos match in your dataset."
476
+ )
477
+
478
+ self.instance_videos = self._preprocess_data()
479
+
480
+ def __len__(self):
481
+ return self.num_instance_videos
482
+
483
+ def __getitem__(self, index):
484
+ return {
485
+ "instance_prompt": self.instance_prompts[index],
486
+ "instance_video": self.instance_videos[index],
487
+ }
488
+
489
+ def _load_dataset_from_hub(self):
490
+ try:
491
+ from datasets import load_dataset
492
+ except ImportError:
493
+ raise ImportError(
494
+ "You are trying to load your data using the datasets library. If you wish to train using custom "
495
+ "captions please install the datasets library: `pip install datasets`. If you wish to load a "
496
+ "local folder containing images only, specify --instance_data_root instead."
497
+ )
498
+
499
+ # Downloading and loading a dataset from the hub. See more about loading custom images at
500
+ # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
501
+ dataset = load_dataset(
502
+ self.dataset_name,
503
+ self.dataset_config_name,
504
+ cache_dir=self.cache_dir,
505
+ )
506
+ column_names = dataset["train"].column_names
507
+
508
+ if self.video_column is None:
509
+ video_column = column_names[0]
510
+ logger.info(f"`video_column` defaulting to {video_column}")
511
+ else:
512
+ video_column = self.video_column
513
+ if video_column not in column_names:
514
+ raise ValueError(
515
+ f"`--video_column` value '{video_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
516
+ )
517
+
518
+ if self.caption_column is None:
519
+ caption_column = column_names[1]
520
+ logger.info(f"`caption_column` defaulting to {caption_column}")
521
+ else:
522
+ caption_column = self.caption_column
523
+ if self.caption_column not in column_names:
524
+ raise ValueError(
525
+ f"`--caption_column` value '{self.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
526
+ )
527
+
528
+ instance_prompts = dataset["train"][caption_column]
529
+ instance_videos = [Path(self.instance_data_root, filepath) for filepath in dataset["train"][video_column]]
530
+
531
+ return instance_prompts, instance_videos
532
+
533
+ def _load_dataset_from_local_path(self):
534
+ if not self.instance_data_root.exists():
535
+ raise ValueError("Instance videos root folder does not exist")
536
+
537
+ prompt_path = self.instance_data_root.joinpath(self.caption_column)
538
+ video_path = self.instance_data_root.joinpath(self.video_column)
539
+
540
+ if not prompt_path.exists() or not prompt_path.is_file():
541
+ raise ValueError(
542
+ "Expected `--caption_column` to be path to a file in `--instance_data_root` containing line-separated text prompts."
543
+ )
544
+ if not video_path.exists() or not video_path.is_file():
545
+ raise ValueError(
546
+ "Expected `--video_column` to be path to a file in `--instance_data_root` containing line-separated paths to video data in the same directory."
547
+ )
548
+
549
+ with open(prompt_path, "r", encoding="utf-8") as file:
550
+ instance_prompts = [line.strip() for line in file.readlines() if len(line.strip()) > 0]
551
+ with open(video_path, "r", encoding="utf-8") as file:
552
+ instance_videos = [
553
+ self.instance_data_root.joinpath(line.strip()) for line in file.readlines() if len(line.strip()) > 0
554
+ ]
555
+
556
+ if any(not path.is_file() for path in instance_videos):
557
+ raise ValueError(
558
+ "Expected '--video_column' to be a path to a file in `--instance_data_root` containing line-separated paths to video data but found at least one path that is not a valid file."
559
+ )
560
+
561
+ return instance_prompts, instance_videos
562
+
563
+ def _preprocess_data(self):
564
+ try:
565
+ import decord
566
+ except ImportError:
567
+ raise ImportError(
568
+ "The `decord` package is required for loading the video dataset. Install with `pip install decord`"
569
+ )
570
+
571
+ decord.bridge.set_bridge("torch")
572
+
573
+ videos = []
574
+ train_transforms = transforms.Compose(
575
+ [
576
+ transforms.Lambda(lambda x: x / 255.0 * 2.0 - 1.0),
577
+ ]
578
+ )
579
+
580
+ for filename in self.instance_video_paths:
581
+ video_reader = decord.VideoReader(uri=filename.as_posix(), width=self.width, height=self.height)
582
+ video_num_frames = len(video_reader)
583
+
584
+ start_frame = min(self.skip_frames_start, video_num_frames)
585
+ end_frame = max(0, video_num_frames - self.skip_frames_end)
586
+ if end_frame <= start_frame:
587
+ frames = video_reader.get_batch([start_frame])
588
+ elif end_frame - start_frame <= self.max_num_frames:
589
+ frames = video_reader.get_batch(list(range(start_frame, end_frame)))
590
+ else:
591
+ indices = list(range(start_frame, end_frame, (end_frame - start_frame) // self.max_num_frames))
592
+ frames = video_reader.get_batch(indices)
593
+
594
+ # Ensure that we don't go over the limit
595
+ frames = frames[: self.max_num_frames]
596
+ selected_num_frames = frames.shape[0]
597
+
598
+ # Choose first (4k + 1) frames as this is how many is required by the VAE
599
+ remainder = (3 + (selected_num_frames % 4)) % 4
600
+ if remainder != 0:
601
+ frames = frames[:-remainder]
602
+ selected_num_frames = frames.shape[0]
603
+
604
+ assert (selected_num_frames - 1) % 4 == 0
605
+
606
+ # Training transforms
607
+ frames = frames.float()
608
+ frames = torch.stack([train_transforms(frame) for frame in frames], dim=0)
609
+ videos.append(frames.permute(0, 3, 1, 2).contiguous()) # [F, C, H, W]
610
+
611
+ return videos
612
+
613
+
614
+ def save_model_card(
615
+ repo_id: str,
616
+ videos=None,
617
+ base_model: str = None,
618
+ validation_prompt=None,
619
+ repo_folder=None,
620
+ fps=8,
621
+ ):
622
+ widget_dict = []
623
+ if videos is not None:
624
+ for i, video in enumerate(videos):
625
+ video_path = f"final_video_{i}.mp4"
626
+ export_to_video(video, os.path.join(repo_folder, video_path, fps=fps))
627
+ widget_dict.append(
628
+ {"text": validation_prompt if validation_prompt else " ", "output": {"url": video_path}},
629
+ )
630
+
631
+ model_description = f"""
632
+ # CogVideoX LoRA - {repo_id}
633
+
634
+ <Gallery />
635
+
636
+ ## Model description
637
+
638
+ These are {repo_id} LoRA weights for {base_model}.
639
+
640
+ The weights were trained using the [CogVideoX Diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/cogvideo/train_cogvideox_image_to_video_lora.py).
641
+
642
+ Was LoRA for the text encoder enabled? No.
643
+
644
+ ## Download model
645
+
646
+ [Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab.
647
+
648
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
649
+
650
+ ```py
651
+ import torch
652
+ from diffusers import CogVideoXImageToVideoPipeline
653
+ from diffusers.utils import load_image, export_to_video
654
+
655
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16).to("cuda")
656
+ pipe.load_lora_weights("{repo_id}", weight_name="pytorch_lora_weights.safetensors", adapter_name=["cogvideox-i2v-lora"])
657
+
658
+ # The LoRA adapter weights are determined by what was used for training.
659
+ # In this case, we assume `--lora_alpha` is 32 and `--rank` is 64.
660
+ # It can be made lower or higher from what was used in training to decrease or amplify the effect
661
+ # of the LoRA upto a tolerance, beyond which one might notice no effect at all or overflows.
662
+ pipe.set_adapters(["cogvideox-i2v-lora"], [32 / 64])
663
+
664
+ image = load_image("/path/to/image")
665
+ video = pipe(image=image, "{validation_prompt}", guidance_scale=6, use_dynamic_cfg=True).frames[0]
666
+ export_to_video(video, "output.mp4", fps=8)
667
+ ```
668
+
669
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
670
+
671
+ ## License
672
+
673
+ Please adhere to the licensing terms as described [here](https://huggingface.co/THUDM/CogVideoX-5b-I2V/blob/main/LICENSE).
674
+ """
675
+ model_card = load_or_create_model_card(
676
+ repo_id_or_path=repo_id,
677
+ from_training=True,
678
+ license="other",
679
+ base_model=base_model,
680
+ prompt=validation_prompt,
681
+ model_description=model_description,
682
+ widget=widget_dict,
683
+ )
684
+ tags = [
685
+ "image-to-video",
686
+ "diffusers-training",
687
+ "diffusers",
688
+ "lora",
689
+ "cogvideox",
690
+ "cogvideox-diffusers",
691
+ "template:sd-lora",
692
+ ]
693
+
694
+ model_card = populate_model_card(model_card, tags=tags)
695
+ model_card.save(os.path.join(repo_folder, "README.md"))
696
+
697
+
698
+ def log_validation(
699
+ pipe,
700
+ args,
701
+ accelerator,
702
+ pipeline_args,
703
+ epoch,
704
+ is_final_validation: bool = False,
705
+ ):
706
+ logger.info(
707
+ f"Running validation... \n Generating {args.num_validation_videos} videos with prompt: {pipeline_args['prompt']}."
708
+ )
709
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
710
+ scheduler_args = {}
711
+
712
+ if "variance_type" in pipe.scheduler.config:
713
+ variance_type = pipe.scheduler.config.variance_type
714
+
715
+ if variance_type in ["learned", "learned_range"]:
716
+ variance_type = "fixed_small"
717
+
718
+ scheduler_args["variance_type"] = variance_type
719
+
720
+ pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
721
+ pipe = pipe.to(accelerator.device)
722
+ # pipe.set_progress_bar_config(disable=True)
723
+
724
+ # run inference
725
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None
726
+
727
+ videos = []
728
+ for _ in range(args.num_validation_videos):
729
+ video = pipe(**pipeline_args, generator=generator, output_type="np").frames[0]
730
+ videos.append(video)
731
+
732
+ for tracker in accelerator.trackers:
733
+ phase_name = "test" if is_final_validation else "validation"
734
+ if tracker.name == "wandb":
735
+ video_filenames = []
736
+ for i, video in enumerate(videos):
737
+ prompt = (
738
+ pipeline_args["prompt"][:25]
739
+ .replace(" ", "_")
740
+ .replace(" ", "_")
741
+ .replace("'", "_")
742
+ .replace('"', "_")
743
+ .replace("/", "_")
744
+ )
745
+ filename = os.path.join(args.output_dir, f"{phase_name}_video_{i}_{prompt}.mp4")
746
+ export_to_video(video, filename, fps=8)
747
+ video_filenames.append(filename)
748
+
749
+ tracker.log(
750
+ {
751
+ phase_name: [
752
+ wandb.Video(filename, caption=f"{i}: {pipeline_args['prompt']}")
753
+ for i, filename in enumerate(video_filenames)
754
+ ]
755
+ }
756
+ )
757
+
758
+ del pipe
759
+ free_memory()
760
+
761
+ return videos
762
+
763
+
764
+ def _get_t5_prompt_embeds(
765
+ tokenizer: T5Tokenizer,
766
+ text_encoder: T5EncoderModel,
767
+ prompt: Union[str, List[str]],
768
+ num_videos_per_prompt: int = 1,
769
+ max_sequence_length: int = 226,
770
+ device: Optional[torch.device] = None,
771
+ dtype: Optional[torch.dtype] = None,
772
+ text_input_ids=None,
773
+ ):
774
+ prompt = [prompt] if isinstance(prompt, str) else prompt
775
+ batch_size = len(prompt)
776
+
777
+ if tokenizer is not None:
778
+ text_inputs = tokenizer(
779
+ prompt,
780
+ padding="max_length",
781
+ max_length=max_sequence_length,
782
+ truncation=True,
783
+ add_special_tokens=True,
784
+ return_tensors="pt",
785
+ )
786
+ text_input_ids = text_inputs.input_ids
787
+ else:
788
+ if text_input_ids is None:
789
+ raise ValueError("`text_input_ids` must be provided when the tokenizer is not specified.")
790
+
791
+ prompt_embeds = text_encoder(text_input_ids.to(device))[0]
792
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
793
+
794
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
795
+ _, seq_len, _ = prompt_embeds.shape
796
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
797
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
798
+
799
+ return prompt_embeds
800
+
801
+
802
+ def encode_prompt(
803
+ tokenizer: T5Tokenizer,
804
+ text_encoder: T5EncoderModel,
805
+ prompt: Union[str, List[str]],
806
+ num_videos_per_prompt: int = 1,
807
+ max_sequence_length: int = 226,
808
+ device: Optional[torch.device] = None,
809
+ dtype: Optional[torch.dtype] = None,
810
+ text_input_ids=None,
811
+ ):
812
+ prompt = [prompt] if isinstance(prompt, str) else prompt
813
+ prompt_embeds = _get_t5_prompt_embeds(
814
+ tokenizer,
815
+ text_encoder,
816
+ prompt=prompt,
817
+ num_videos_per_prompt=num_videos_per_prompt,
818
+ max_sequence_length=max_sequence_length,
819
+ device=device,
820
+ dtype=dtype,
821
+ text_input_ids=text_input_ids,
822
+ )
823
+ return prompt_embeds
824
+
825
+
826
+ def compute_prompt_embeddings(
827
+ tokenizer, text_encoder, prompt, max_sequence_length, device, dtype, requires_grad: bool = False
828
+ ):
829
+ if requires_grad:
830
+ prompt_embeds = encode_prompt(
831
+ tokenizer,
832
+ text_encoder,
833
+ prompt,
834
+ num_videos_per_prompt=1,
835
+ max_sequence_length=max_sequence_length,
836
+ device=device,
837
+ dtype=dtype,
838
+ )
839
+ else:
840
+ with torch.no_grad():
841
+ prompt_embeds = encode_prompt(
842
+ tokenizer,
843
+ text_encoder,
844
+ prompt,
845
+ num_videos_per_prompt=1,
846
+ max_sequence_length=max_sequence_length,
847
+ device=device,
848
+ dtype=dtype,
849
+ )
850
+ return prompt_embeds
851
+
852
+
853
+ def prepare_rotary_positional_embeddings(
854
+ height: int,
855
+ width: int,
856
+ num_frames: int,
857
+ vae_scale_factor_spatial: int = 8,
858
+ patch_size: int = 2,
859
+ attention_head_dim: int = 64,
860
+ device: Optional[torch.device] = None,
861
+ base_height: int = 480,
862
+ base_width: int = 720,
863
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
864
+ grid_height = height // (vae_scale_factor_spatial * patch_size)
865
+ grid_width = width // (vae_scale_factor_spatial * patch_size)
866
+ base_size_width = base_width // (vae_scale_factor_spatial * patch_size)
867
+ base_size_height = base_height // (vae_scale_factor_spatial * patch_size)
868
+
869
+ grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size_width, base_size_height)
870
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
871
+ embed_dim=attention_head_dim,
872
+ crops_coords=grid_crops_coords,
873
+ grid_size=(grid_height, grid_width),
874
+ temporal_size=num_frames,
875
+ device=device,
876
+ )
877
+
878
+ return freqs_cos, freqs_sin
879
+
880
+
881
+ def get_optimizer(args, params_to_optimize, use_deepspeed: bool = False):
882
+ # Use DeepSpeed optimizer
883
+ if use_deepspeed:
884
+ from accelerate.utils import DummyOptim
885
+
886
+ return DummyOptim(
887
+ params_to_optimize,
888
+ lr=args.learning_rate,
889
+ betas=(args.adam_beta1, args.adam_beta2),
890
+ eps=args.adam_epsilon,
891
+ weight_decay=args.adam_weight_decay,
892
+ )
893
+
894
+ # Optimizer creation
895
+ supported_optimizers = ["adam", "adamw", "prodigy"]
896
+ if args.optimizer not in supported_optimizers:
897
+ logger.warning(
898
+ f"Unsupported choice of optimizer: {args.optimizer}. Supported optimizers include {supported_optimizers}. Defaulting to AdamW"
899
+ )
900
+ args.optimizer = "adamw"
901
+
902
+ if args.use_8bit_adam and args.optimizer.lower() not in ["adam", "adamw"]:
903
+ logger.warning(
904
+ f"use_8bit_adam is ignored when optimizer is not set to 'Adam' or 'AdamW'. Optimizer was "
905
+ f"set to {args.optimizer.lower()}"
906
+ )
907
+
908
+ if args.use_8bit_adam:
909
+ try:
910
+ import bitsandbytes as bnb
911
+ except ImportError:
912
+ raise ImportError(
913
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
914
+ )
915
+
916
+ if args.optimizer.lower() == "adamw":
917
+ optimizer_class = bnb.optim.AdamW8bit if args.use_8bit_adam else torch.optim.AdamW
918
+
919
+ optimizer = optimizer_class(
920
+ params_to_optimize,
921
+ betas=(args.adam_beta1, args.adam_beta2),
922
+ eps=args.adam_epsilon,
923
+ weight_decay=args.adam_weight_decay,
924
+ )
925
+ elif args.optimizer.lower() == "adam":
926
+ optimizer_class = bnb.optim.Adam8bit if args.use_8bit_adam else torch.optim.Adam
927
+
928
+ optimizer = optimizer_class(
929
+ params_to_optimize,
930
+ betas=(args.adam_beta1, args.adam_beta2),
931
+ eps=args.adam_epsilon,
932
+ weight_decay=args.adam_weight_decay,
933
+ )
934
+ elif args.optimizer.lower() == "prodigy":
935
+ try:
936
+ import prodigyopt
937
+ except ImportError:
938
+ raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
939
+
940
+ optimizer_class = prodigyopt.Prodigy
941
+
942
+ if args.learning_rate <= 0.1:
943
+ logger.warning(
944
+ "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
945
+ )
946
+
947
+ optimizer = optimizer_class(
948
+ params_to_optimize,
949
+ betas=(args.adam_beta1, args.adam_beta2),
950
+ beta3=args.prodigy_beta3,
951
+ weight_decay=args.adam_weight_decay,
952
+ eps=args.adam_epsilon,
953
+ decouple=args.prodigy_decouple,
954
+ use_bias_correction=args.prodigy_use_bias_correction,
955
+ safeguard_warmup=args.prodigy_safeguard_warmup,
956
+ )
957
+
958
+ return optimizer
959
+
960
+
961
+ def main(args):
962
+ if args.report_to == "wandb" and args.hub_token is not None:
963
+ raise ValueError(
964
+ "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
965
+ " Please use `hf auth login` to authenticate with the Hub."
966
+ )
967
+
968
+ if torch.backends.mps.is_available() and args.mixed_precision == "bf16":
969
+ # due to pytorch#99272, MPS does not yet support bfloat16.
970
+ raise ValueError(
971
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
972
+ )
973
+
974
+ logging_dir = Path(args.output_dir, args.logging_dir)
975
+
976
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
977
+ ddp_kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
978
+ init_kwargs = InitProcessGroupKwargs(backend="nccl", timeout=timedelta(seconds=args.nccl_timeout))
979
+ accelerator = Accelerator(
980
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
981
+ mixed_precision=args.mixed_precision,
982
+ log_with=args.report_to,
983
+ project_config=accelerator_project_config,
984
+ kwargs_handlers=[ddp_kwargs, init_kwargs],
985
+ )
986
+
987
+ # Disable AMP for MPS.
988
+ if torch.backends.mps.is_available():
989
+ accelerator.native_amp = False
990
+
991
+ if args.report_to == "wandb":
992
+ if not is_wandb_available():
993
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
994
+
995
+ # Make one log on every process with the configuration for debugging.
996
+ logging.basicConfig(
997
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
998
+ datefmt="%m/%d/%Y %H:%M:%S",
999
+ level=logging.INFO,
1000
+ )
1001
+ logger.info(accelerator.state, main_process_only=False)
1002
+ if accelerator.is_local_main_process:
1003
+ transformers.utils.logging.set_verbosity_warning()
1004
+ diffusers.utils.logging.set_verbosity_info()
1005
+ else:
1006
+ transformers.utils.logging.set_verbosity_error()
1007
+ diffusers.utils.logging.set_verbosity_error()
1008
+
1009
+ # If passed along, set the training seed now.
1010
+ if args.seed is not None:
1011
+ set_seed(args.seed)
1012
+
1013
+ # Handle the repository creation
1014
+ if accelerator.is_main_process:
1015
+ if args.output_dir is not None:
1016
+ os.makedirs(args.output_dir, exist_ok=True)
1017
+
1018
+ if args.push_to_hub:
1019
+ repo_id = create_repo(
1020
+ repo_id=args.hub_model_id or Path(args.output_dir).name,
1021
+ exist_ok=True,
1022
+ ).repo_id
1023
+
1024
+ # Prepare models and scheduler
1025
+ tokenizer = AutoTokenizer.from_pretrained(
1026
+ args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
1027
+ )
1028
+
1029
+ text_encoder = T5EncoderModel.from_pretrained(
1030
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
1031
+ )
1032
+
1033
+ # CogVideoX-2b weights are stored in float16
1034
+ # CogVideoX-5b and CogVideoX-5b-I2V weights are stored in bfloat16
1035
+ load_dtype = torch.bfloat16 if "5b" in args.pretrained_model_name_or_path.lower() else torch.float16
1036
+ transformer = CogVideoXTransformer3DModel.from_pretrained(
1037
+ args.pretrained_model_name_or_path,
1038
+ subfolder="transformer",
1039
+ torch_dtype=load_dtype,
1040
+ revision=args.revision,
1041
+ variant=args.variant,
1042
+ )
1043
+
1044
+ vae = AutoencoderKLCogVideoX.from_pretrained(
1045
+ args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
1046
+ )
1047
+
1048
+ scheduler = CogVideoXDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
1049
+
1050
+ if args.enable_slicing:
1051
+ vae.enable_slicing()
1052
+ if args.enable_tiling:
1053
+ vae.enable_tiling()
1054
+
1055
+ # We only train the additional adapter LoRA layers
1056
+ text_encoder.requires_grad_(False)
1057
+ transformer.requires_grad_(False)
1058
+ vae.requires_grad_(False)
1059
+
1060
+ # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision
1061
+ # as these weights are only used for inference, keeping weights in full precision is not required.
1062
+ weight_dtype = torch.float32
1063
+ if accelerator.state.deepspeed_plugin:
1064
+ # DeepSpeed is handling precision, use what's in the DeepSpeed config
1065
+ if (
1066
+ "fp16" in accelerator.state.deepspeed_plugin.deepspeed_config
1067
+ and accelerator.state.deepspeed_plugin.deepspeed_config["fp16"]["enabled"]
1068
+ ):
1069
+ weight_dtype = torch.float16
1070
+ if (
1071
+ "bf16" in accelerator.state.deepspeed_plugin.deepspeed_config
1072
+ and accelerator.state.deepspeed_plugin.deepspeed_config["bf16"]["enabled"]
1073
+ ):
1074
+ weight_dtype = torch.float16
1075
+ else:
1076
+ if accelerator.mixed_precision == "fp16":
1077
+ weight_dtype = torch.float16
1078
+ elif accelerator.mixed_precision == "bf16":
1079
+ weight_dtype = torch.bfloat16
1080
+
1081
+ if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16:
1082
+ # due to pytorch#99272, MPS does not yet support bfloat16.
1083
+ raise ValueError(
1084
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
1085
+ )
1086
+
1087
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
1088
+ transformer.to(accelerator.device, dtype=weight_dtype)
1089
+ vae.to(accelerator.device, dtype=weight_dtype)
1090
+
1091
+ if args.gradient_checkpointing:
1092
+ transformer.enable_gradient_checkpointing()
1093
+
1094
+ # now we will add new LoRA weights to the attention layers
1095
+ transformer_lora_config = LoraConfig(
1096
+ r=args.rank,
1097
+ lora_alpha=args.lora_alpha,
1098
+ init_lora_weights=True,
1099
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
1100
+ )
1101
+ transformer.add_adapter(transformer_lora_config)
1102
+
1103
+ def unwrap_model(model):
1104
+ model = accelerator.unwrap_model(model)
1105
+ model = model._orig_mod if is_compiled_module(model) else model
1106
+ return model
1107
+
1108
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
1109
+ def save_model_hook(models, weights, output_dir):
1110
+ if accelerator.is_main_process:
1111
+ transformer_lora_layers_to_save = None
1112
+
1113
+ for model in models:
1114
+ if isinstance(model, type(unwrap_model(transformer))):
1115
+ transformer_lora_layers_to_save = get_peft_model_state_dict(model)
1116
+ else:
1117
+ raise ValueError(f"unexpected save model: {model.__class__}")
1118
+
1119
+ # make sure to pop weight so that corresponding model is not saved again
1120
+ weights.pop()
1121
+
1122
+ CogVideoXImageToVideoPipeline.save_lora_weights(
1123
+ output_dir,
1124
+ transformer_lora_layers=transformer_lora_layers_to_save,
1125
+ )
1126
+
1127
+ def load_model_hook(models, input_dir):
1128
+ transformer_ = None
1129
+
1130
+ while len(models) > 0:
1131
+ model = models.pop()
1132
+
1133
+ if isinstance(model, type(unwrap_model(transformer))):
1134
+ transformer_ = model
1135
+ else:
1136
+ raise ValueError(f"Unexpected save model: {model.__class__}")
1137
+
1138
+ lora_state_dict = CogVideoXImageToVideoPipeline.lora_state_dict(input_dir)
1139
+
1140
+ transformer_state_dict = {
1141
+ f"{k.replace('transformer.', '')}": v for k, v in lora_state_dict.items() if k.startswith("transformer.")
1142
+ }
1143
+ transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict)
1144
+ incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default")
1145
+ if incompatible_keys is not None:
1146
+ # check only for unexpected keys
1147
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
1148
+ if unexpected_keys:
1149
+ logger.warning(
1150
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
1151
+ f" {unexpected_keys}. "
1152
+ )
1153
+
1154
+ # Make sure the trainable params are in float32. This is again needed since the base models
1155
+ # are in `weight_dtype`. More details:
1156
+ # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
1157
+ if args.mixed_precision == "fp16":
1158
+ # only upcast trainable parameters (LoRA) into fp32
1159
+ cast_training_params([transformer_])
1160
+
1161
+ accelerator.register_save_state_pre_hook(save_model_hook)
1162
+ accelerator.register_load_state_pre_hook(load_model_hook)
1163
+
1164
+ # Enable TF32 for faster training on Ampere GPUs,
1165
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
1166
+ if args.allow_tf32 and torch.cuda.is_available():
1167
+ torch.backends.cuda.matmul.allow_tf32 = True
1168
+
1169
+ if args.scale_lr:
1170
+ args.learning_rate = (
1171
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
1172
+ )
1173
+
1174
+ # Make sure the trainable params are in float32.
1175
+ if args.mixed_precision == "fp16":
1176
+ # only upcast trainable parameters (LoRA) into fp32
1177
+ cast_training_params([transformer], dtype=torch.float32)
1178
+
1179
+ transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters()))
1180
+
1181
+ # Optimization parameters
1182
+ transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate}
1183
+ params_to_optimize = [transformer_parameters_with_lr]
1184
+
1185
+ use_deepspeed_optimizer = (
1186
+ accelerator.state.deepspeed_plugin is not None
1187
+ and "optimizer" in accelerator.state.deepspeed_plugin.deepspeed_config
1188
+ )
1189
+ use_deepspeed_scheduler = (
1190
+ accelerator.state.deepspeed_plugin is not None
1191
+ and "scheduler" in accelerator.state.deepspeed_plugin.deepspeed_config
1192
+ )
1193
+
1194
+ optimizer = get_optimizer(args, params_to_optimize, use_deepspeed=use_deepspeed_optimizer)
1195
+
1196
+ # Dataset and DataLoader
1197
+ train_dataset = VideoDataset(
1198
+ instance_data_root=args.instance_data_root,
1199
+ dataset_name=args.dataset_name,
1200
+ dataset_config_name=args.dataset_config_name,
1201
+ caption_column=args.caption_column,
1202
+ video_column=args.video_column,
1203
+ height=args.height,
1204
+ width=args.width,
1205
+ fps=args.fps,
1206
+ max_num_frames=args.max_num_frames,
1207
+ skip_frames_start=args.skip_frames_start,
1208
+ skip_frames_end=args.skip_frames_end,
1209
+ cache_dir=args.cache_dir,
1210
+ id_token=args.id_token,
1211
+ )
1212
+
1213
+ def encode_video(video):
1214
+ video = video.to(accelerator.device, dtype=vae.dtype).unsqueeze(0)
1215
+ video = video.permute(0, 2, 1, 3, 4) # [B, C, F, H, W]
1216
+ image = video[:, :, :1].clone()
1217
+
1218
+ latent_dist = vae.encode(video).latent_dist
1219
+
1220
+ image_noise_sigma = torch.normal(mean=-3.0, std=0.5, size=(1,), device=image.device)
1221
+ image_noise_sigma = torch.exp(image_noise_sigma).to(dtype=image.dtype)
1222
+ noisy_image = image + torch.randn_like(image) * image_noise_sigma[:, None, None, None, None]
1223
+ image_latent_dist = vae.encode(noisy_image).latent_dist
1224
+
1225
+ return latent_dist, image_latent_dist
1226
+
1227
+ train_dataset.instance_prompts = [
1228
+ compute_prompt_embeddings(
1229
+ tokenizer,
1230
+ text_encoder,
1231
+ [prompt],
1232
+ transformer.config.max_text_seq_length,
1233
+ accelerator.device,
1234
+ weight_dtype,
1235
+ requires_grad=False,
1236
+ )
1237
+ for prompt in train_dataset.instance_prompts
1238
+ ]
1239
+ train_dataset.instance_videos = [encode_video(video) for video in train_dataset.instance_videos]
1240
+
1241
+ def collate_fn(examples):
1242
+ videos = []
1243
+ images = []
1244
+ for example in examples:
1245
+ latent_dist, image_latent_dist = example["instance_video"]
1246
+
1247
+ video_latents = latent_dist.sample() * vae.config.scaling_factor
1248
+ image_latents = image_latent_dist.sample() * vae.config.scaling_factor
1249
+ video_latents = video_latents.permute(0, 2, 1, 3, 4)
1250
+ image_latents = image_latents.permute(0, 2, 1, 3, 4)
1251
+
1252
+ padding_shape = (video_latents.shape[0], video_latents.shape[1] - 1, *video_latents.shape[2:])
1253
+ latent_padding = image_latents.new_zeros(padding_shape)
1254
+ image_latents = torch.cat([image_latents, latent_padding], dim=1)
1255
+
1256
+ if random.random() < args.noised_image_dropout:
1257
+ image_latents = torch.zeros_like(image_latents)
1258
+
1259
+ videos.append(video_latents)
1260
+ images.append(image_latents)
1261
+
1262
+ videos = torch.cat(videos)
1263
+ images = torch.cat(images)
1264
+ videos = videos.to(memory_format=torch.contiguous_format).float()
1265
+ images = images.to(memory_format=torch.contiguous_format).float()
1266
+
1267
+ prompts = [example["instance_prompt"] for example in examples]
1268
+ prompts = torch.cat(prompts)
1269
+
1270
+ return {
1271
+ "videos": (videos, images),
1272
+ "prompts": prompts,
1273
+ }
1274
+
1275
+ train_dataloader = DataLoader(
1276
+ train_dataset,
1277
+ batch_size=args.train_batch_size,
1278
+ shuffle=True,
1279
+ collate_fn=collate_fn,
1280
+ num_workers=args.dataloader_num_workers,
1281
+ )
1282
+
1283
+ # Scheduler and math around the number of training steps.
1284
+ overrode_max_train_steps = False
1285
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1286
+ if args.max_train_steps is None:
1287
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1288
+ overrode_max_train_steps = True
1289
+
1290
+ if use_deepspeed_scheduler:
1291
+ from accelerate.utils import DummyScheduler
1292
+
1293
+ lr_scheduler = DummyScheduler(
1294
+ name=args.lr_scheduler,
1295
+ optimizer=optimizer,
1296
+ total_num_steps=args.max_train_steps * accelerator.num_processes,
1297
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1298
+ )
1299
+ else:
1300
+ lr_scheduler = get_scheduler(
1301
+ args.lr_scheduler,
1302
+ optimizer=optimizer,
1303
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1304
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
1305
+ num_cycles=args.lr_num_cycles,
1306
+ power=args.lr_power,
1307
+ )
1308
+
1309
+ # Prepare everything with our `accelerator`.
1310
+ transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1311
+ transformer, optimizer, train_dataloader, lr_scheduler
1312
+ )
1313
+
1314
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1315
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1316
+ if overrode_max_train_steps:
1317
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1318
+ # Afterwards we recalculate our number of training epochs
1319
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1320
+
1321
+ # We need to initialize the trackers we use, and also store our configuration.
1322
+ # The trackers initializes automatically on the main process.
1323
+ if accelerator.is_main_process:
1324
+ tracker_name = args.tracker_name or "cogvideox-i2v-lora"
1325
+ accelerator.init_trackers(tracker_name, config=vars(args))
1326
+
1327
+ # Train!
1328
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1329
+ num_trainable_parameters = sum(param.numel() for model in params_to_optimize for param in model["params"])
1330
+
1331
+ logger.info("***** Running training *****")
1332
+ logger.info(f" Num trainable parameters = {num_trainable_parameters}")
1333
+ logger.info(f" Num examples = {len(train_dataset)}")
1334
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1335
+ logger.info(f" Num epochs = {args.num_train_epochs}")
1336
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1337
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1338
+ logger.info(f" Gradient accumulation steps = {args.gradient_accumulation_steps}")
1339
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1340
+ global_step = 0
1341
+ first_epoch = 0
1342
+
1343
+ # Potentially load in the weights and states from a previous save
1344
+ if not args.resume_from_checkpoint:
1345
+ initial_global_step = 0
1346
+ else:
1347
+ if args.resume_from_checkpoint != "latest":
1348
+ path = os.path.basename(args.resume_from_checkpoint)
1349
+ else:
1350
+ # Get the mos recent checkpoint
1351
+ dirs = os.listdir(args.output_dir)
1352
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1353
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1354
+ path = dirs[-1] if len(dirs) > 0 else None
1355
+
1356
+ if path is None:
1357
+ accelerator.print(
1358
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1359
+ )
1360
+ args.resume_from_checkpoint = None
1361
+ initial_global_step = 0
1362
+ else:
1363
+ accelerator.print(f"Resuming from checkpoint {path}")
1364
+ accelerator.load_state(os.path.join(args.output_dir, path))
1365
+ global_step = int(path.split("-")[1])
1366
+
1367
+ initial_global_step = global_step
1368
+ first_epoch = global_step // num_update_steps_per_epoch
1369
+
1370
+ progress_bar = tqdm(
1371
+ range(0, args.max_train_steps),
1372
+ initial=initial_global_step,
1373
+ desc="Steps",
1374
+ # Only show the progress bar once on each machine.
1375
+ disable=not accelerator.is_local_main_process,
1376
+ )
1377
+ vae_scale_factor_spatial = 2 ** (len(vae.config.block_out_channels) - 1)
1378
+
1379
+ # For DeepSpeed training
1380
+ model_config = transformer.module.config if hasattr(transformer, "module") else transformer.config
1381
+
1382
+ for epoch in range(first_epoch, args.num_train_epochs):
1383
+ transformer.train()
1384
+
1385
+ for step, batch in enumerate(train_dataloader):
1386
+ models_to_accumulate = [transformer]
1387
+
1388
+ with accelerator.accumulate(models_to_accumulate):
1389
+ video_latents, image_latents = batch["videos"]
1390
+ prompt_embeds = batch["prompts"]
1391
+
1392
+ video_latents = video_latents.to(dtype=weight_dtype) # [B, F, C, H, W]
1393
+ image_latents = image_latents.to(dtype=weight_dtype) # [B, F, C, H, W]
1394
+
1395
+ batch_size, num_frames, num_channels, height, width = video_latents.shape
1396
+
1397
+ # Sample a random timestep for each image
1398
+ timesteps = torch.randint(
1399
+ 0, scheduler.config.num_train_timesteps, (batch_size,), device=video_latents.device
1400
+ )
1401
+ timesteps = timesteps.long()
1402
+
1403
+ # Sample noise that will be added to the latents
1404
+ noise = torch.randn_like(video_latents)
1405
+
1406
+ # Add noise to the model input according to the noise magnitude at each timestep
1407
+ # (this is the forward diffusion process)
1408
+ noisy_video_latents = scheduler.add_noise(video_latents, noise, timesteps)
1409
+ noisy_model_input = torch.cat([noisy_video_latents, image_latents], dim=2)
1410
+
1411
+ # Prepare rotary embeds
1412
+ image_rotary_emb = (
1413
+ prepare_rotary_positional_embeddings(
1414
+ height=args.height,
1415
+ width=args.width,
1416
+ num_frames=num_frames,
1417
+ vae_scale_factor_spatial=vae_scale_factor_spatial,
1418
+ patch_size=model_config.patch_size,
1419
+ attention_head_dim=model_config.attention_head_dim,
1420
+ device=accelerator.device,
1421
+ )
1422
+ if model_config.use_rotary_positional_embeddings
1423
+ else None
1424
+ )
1425
+
1426
+ # Predict the noise residual
1427
+ model_output = transformer(
1428
+ hidden_states=noisy_model_input,
1429
+ encoder_hidden_states=prompt_embeds,
1430
+ timestep=timesteps,
1431
+ image_rotary_emb=image_rotary_emb,
1432
+ return_dict=False,
1433
+ )[0]
1434
+ model_pred = scheduler.get_velocity(model_output, noisy_video_latents, timesteps)
1435
+
1436
+ alphas_cumprod = scheduler.alphas_cumprod[timesteps]
1437
+ weights = 1 / (1 - alphas_cumprod)
1438
+ while len(weights.shape) < len(model_pred.shape):
1439
+ weights = weights.unsqueeze(-1)
1440
+
1441
+ target = video_latents
1442
+
1443
+ loss = torch.mean((weights * (model_pred - target) ** 2).reshape(batch_size, -1), dim=1)
1444
+ loss = loss.mean()
1445
+ accelerator.backward(loss)
1446
+
1447
+ if accelerator.sync_gradients:
1448
+ params_to_clip = transformer.parameters()
1449
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1450
+
1451
+ if accelerator.state.deepspeed_plugin is None:
1452
+ optimizer.step()
1453
+ optimizer.zero_grad()
1454
+
1455
+ lr_scheduler.step()
1456
+
1457
+ # Checks if the accelerator has performed an optimization step behind the scenes
1458
+ if accelerator.sync_gradients:
1459
+ progress_bar.update(1)
1460
+ global_step += 1
1461
+
1462
+ if accelerator.is_main_process or accelerator.distributed_type == DistributedType.DEEPSPEED:
1463
+ if global_step % args.checkpointing_steps == 0:
1464
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1465
+ if args.checkpoints_total_limit is not None:
1466
+ checkpoints = os.listdir(args.output_dir)
1467
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1468
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1469
+
1470
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1471
+ if len(checkpoints) >= args.checkpoints_total_limit:
1472
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1473
+ removing_checkpoints = checkpoints[0:num_to_remove]
1474
+
1475
+ logger.info(
1476
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1477
+ )
1478
+ logger.info(f"Removing checkpoints: {', '.join(removing_checkpoints)}")
1479
+
1480
+ for removing_checkpoint in removing_checkpoints:
1481
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1482
+ shutil.rmtree(removing_checkpoint)
1483
+
1484
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1485
+ accelerator.save_state(save_path)
1486
+ logger.info(f"Saved state to {save_path}")
1487
+
1488
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1489
+ progress_bar.set_postfix(**logs)
1490
+ accelerator.log(logs, step=global_step)
1491
+
1492
+ if global_step >= args.max_train_steps:
1493
+ break
1494
+
1495
+ if accelerator.is_main_process:
1496
+ if args.validation_prompt is not None and (epoch + 1) % args.validation_epochs == 0:
1497
+ # Create pipeline
1498
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
1499
+ args.pretrained_model_name_or_path,
1500
+ transformer=unwrap_model(transformer),
1501
+ scheduler=scheduler,
1502
+ revision=args.revision,
1503
+ variant=args.variant,
1504
+ torch_dtype=weight_dtype,
1505
+ )
1506
+
1507
+ validation_prompts = args.validation_prompt.split(args.validation_prompt_separator)
1508
+ validation_images = args.validation_images.split(args.validation_prompt_separator)
1509
+
1510
+ for validation_image, validation_prompt in zip(validation_images, validation_prompts):
1511
+ pipeline_args = {
1512
+ "image": load_image(validation_image),
1513
+ "prompt": validation_prompt,
1514
+ "guidance_scale": args.guidance_scale,
1515
+ "use_dynamic_cfg": args.use_dynamic_cfg,
1516
+ "height": args.height,
1517
+ "width": args.width,
1518
+ }
1519
+
1520
+ validation_outputs = log_validation(
1521
+ pipe=pipe,
1522
+ args=args,
1523
+ accelerator=accelerator,
1524
+ pipeline_args=pipeline_args,
1525
+ epoch=epoch,
1526
+ )
1527
+
1528
+ # Save the lora layers
1529
+ accelerator.wait_for_everyone()
1530
+ if accelerator.is_main_process:
1531
+ transformer = unwrap_model(transformer)
1532
+ dtype = (
1533
+ torch.float16
1534
+ if args.mixed_precision == "fp16"
1535
+ else torch.bfloat16
1536
+ if args.mixed_precision == "bf16"
1537
+ else torch.float32
1538
+ )
1539
+ transformer = transformer.to(dtype)
1540
+ transformer_lora_layers = get_peft_model_state_dict(transformer)
1541
+
1542
+ CogVideoXImageToVideoPipeline.save_lora_weights(
1543
+ save_directory=args.output_dir,
1544
+ transformer_lora_layers=transformer_lora_layers,
1545
+ )
1546
+
1547
+ # Cleanup trained models to save memory
1548
+ del transformer
1549
+ free_memory()
1550
+
1551
+ # Final test inference
1552
+ pipe = CogVideoXImageToVideoPipeline.from_pretrained(
1553
+ args.pretrained_model_name_or_path,
1554
+ revision=args.revision,
1555
+ variant=args.variant,
1556
+ torch_dtype=weight_dtype,
1557
+ )
1558
+ pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config)
1559
+
1560
+ if args.enable_slicing:
1561
+ pipe.vae.enable_slicing()
1562
+ if args.enable_tiling:
1563
+ pipe.vae.enable_tiling()
1564
+
1565
+ # Load LoRA weights
1566
+ lora_scaling = args.lora_alpha / args.rank
1567
+ pipe.load_lora_weights(args.output_dir, adapter_name="cogvideox-i2v-lora")
1568
+ pipe.set_adapters(["cogvideox-i2v-lora"], [lora_scaling])
1569
+
1570
+ # Run inference
1571
+ validation_outputs = []
1572
+ if args.validation_prompt and args.num_validation_videos > 0:
1573
+ validation_prompts = args.validation_prompt.split(args.validation_prompt_separator)
1574
+ validation_images = args.validation_images.split(args.validation_prompt_separator)
1575
+
1576
+ for validation_image, validation_prompt in zip(validation_images, validation_prompts):
1577
+ pipeline_args = {
1578
+ "image": load_image(validation_image),
1579
+ "prompt": validation_prompt,
1580
+ "guidance_scale": args.guidance_scale,
1581
+ "use_dynamic_cfg": args.use_dynamic_cfg,
1582
+ "height": args.height,
1583
+ "width": args.width,
1584
+ }
1585
+
1586
+ video = log_validation(
1587
+ pipe=pipe,
1588
+ args=args,
1589
+ accelerator=accelerator,
1590
+ pipeline_args=pipeline_args,
1591
+ epoch=epoch,
1592
+ is_final_validation=True,
1593
+ )
1594
+ validation_outputs.extend(video)
1595
+
1596
+ if args.push_to_hub:
1597
+ validation_prompt = args.validation_prompt or ""
1598
+ validation_prompt = validation_prompt.split(args.validation_prompt_separator)[0]
1599
+ save_model_card(
1600
+ repo_id,
1601
+ videos=validation_outputs,
1602
+ base_model=args.pretrained_model_name_or_path,
1603
+ validation_prompt=validation_prompt,
1604
+ repo_folder=args.output_dir,
1605
+ fps=args.fps,
1606
+ )
1607
+ upload_folder(
1608
+ repo_id=repo_id,
1609
+ folder_path=args.output_dir,
1610
+ commit_message="End of training",
1611
+ ignore_patterns=["step_*", "epoch_*"],
1612
+ )
1613
+
1614
+ accelerator.end_training()
1615
+
1616
+
1617
+ if __name__ == "__main__":
1618
+ args = get_args()
1619
+ main(args)
exp_code/1_benchmark/diffusers-WanS2V/examples/cogvideo/train_cogvideox_lora.py ADDED
@@ -0,0 +1,1607 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2025 The HuggingFace Team.
2
+ # All rights reserved.
3
+ #
4
+ # Licensed under the Apache License, Version 2.0 (the "License");
5
+ # you may not use this file except in compliance with the License.
6
+ # You may obtain a copy of the License at
7
+ #
8
+ # http://www.apache.org/licenses/LICENSE-2.0
9
+ #
10
+ # Unless required by applicable law or agreed to in writing, software
11
+ # distributed under the License is distributed on an "AS IS" BASIS,
12
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13
+ # See the License for the specific language governing permissions and
14
+ # limitations under the License.
15
+
16
+ import argparse
17
+ import logging
18
+ import math
19
+ import os
20
+ import shutil
21
+ from pathlib import Path
22
+ from typing import List, Optional, Tuple, Union
23
+
24
+ import numpy as np
25
+ import torch
26
+ import torchvision.transforms as TT
27
+ import transformers
28
+ from accelerate import Accelerator, DistributedType
29
+ from accelerate.logging import get_logger
30
+ from accelerate.utils import DistributedDataParallelKwargs, ProjectConfiguration, set_seed
31
+ from huggingface_hub import create_repo, upload_folder
32
+ from peft import LoraConfig, get_peft_model_state_dict, set_peft_model_state_dict
33
+ from torch.utils.data import DataLoader, Dataset
34
+ from torchvision.transforms import InterpolationMode
35
+ from torchvision.transforms.functional import resize
36
+ from tqdm.auto import tqdm
37
+ from transformers import AutoTokenizer, T5EncoderModel, T5Tokenizer
38
+
39
+ import diffusers
40
+ from diffusers import AutoencoderKLCogVideoX, CogVideoXDPMScheduler, CogVideoXPipeline, CogVideoXTransformer3DModel
41
+ from diffusers.image_processor import VaeImageProcessor
42
+ from diffusers.models.embeddings import get_3d_rotary_pos_embed
43
+ from diffusers.optimization import get_scheduler
44
+ from diffusers.pipelines.cogvideo.pipeline_cogvideox import get_resize_crop_region_for_grid
45
+ from diffusers.training_utils import cast_training_params, free_memory
46
+ from diffusers.utils import check_min_version, convert_unet_state_dict_to_peft, export_to_video, is_wandb_available
47
+ from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
48
+ from diffusers.utils.torch_utils import is_compiled_module
49
+
50
+
51
+ if is_wandb_available():
52
+ import wandb
53
+
54
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
55
+ check_min_version("0.36.0.dev0")
56
+
57
+ logger = get_logger(__name__)
58
+
59
+
60
+ def get_args():
61
+ parser = argparse.ArgumentParser(description="Simple example of a training script for CogVideoX.")
62
+
63
+ # Model information
64
+ parser.add_argument(
65
+ "--pretrained_model_name_or_path",
66
+ type=str,
67
+ default=None,
68
+ required=True,
69
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
70
+ )
71
+ parser.add_argument(
72
+ "--revision",
73
+ type=str,
74
+ default=None,
75
+ required=False,
76
+ help="Revision of pretrained model identifier from huggingface.co/models.",
77
+ )
78
+ parser.add_argument(
79
+ "--variant",
80
+ type=str,
81
+ default=None,
82
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
83
+ )
84
+ parser.add_argument(
85
+ "--cache_dir",
86
+ type=str,
87
+ default=None,
88
+ help="The directory where the downloaded models and datasets will be stored.",
89
+ )
90
+
91
+ # Dataset information
92
+ parser.add_argument(
93
+ "--dataset_name",
94
+ type=str,
95
+ default=None,
96
+ help=(
97
+ "The name of the Dataset (from the HuggingFace hub) containing the training data of instance images (could be your own, possibly private,"
98
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
99
+ " or to a folder containing files that 🤗 Datasets can understand."
100
+ ),
101
+ )
102
+ parser.add_argument(
103
+ "--dataset_config_name",
104
+ type=str,
105
+ default=None,
106
+ help="The config of the Dataset, leave as None if there's only one config.",
107
+ )
108
+ parser.add_argument(
109
+ "--instance_data_root",
110
+ type=str,
111
+ default=None,
112
+ help=("A folder containing the training data."),
113
+ )
114
+ parser.add_argument(
115
+ "--video_column",
116
+ type=str,
117
+ default="video",
118
+ help="The column of the dataset containing videos. Or, the name of the file in `--instance_data_root` folder containing the line-separated path to video data.",
119
+ )
120
+ parser.add_argument(
121
+ "--caption_column",
122
+ type=str,
123
+ default="text",
124
+ help="The column of the dataset containing the instance prompt for each video. Or, the name of the file in `--instance_data_root` folder containing the line-separated instance prompts.",
125
+ )
126
+ parser.add_argument(
127
+ "--id_token", type=str, default=None, help="Identifier token appended to the start of each prompt if provided."
128
+ )
129
+ parser.add_argument(
130
+ "--dataloader_num_workers",
131
+ type=int,
132
+ default=0,
133
+ help=(
134
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
135
+ ),
136
+ )
137
+
138
+ # Validation
139
+ parser.add_argument(
140
+ "--validation_prompt",
141
+ type=str,
142
+ default=None,
143
+ help="One or more prompt(s) that is used during validation to verify that the model is learning. Multiple validation prompts should be separated by the '--validation_prompt_seperator' string.",
144
+ )
145
+ parser.add_argument(
146
+ "--validation_prompt_separator",
147
+ type=str,
148
+ default=":::",
149
+ help="String that separates multiple validation prompts",
150
+ )
151
+ parser.add_argument(
152
+ "--num_validation_videos",
153
+ type=int,
154
+ default=1,
155
+ help="Number of videos that should be generated during validation per `validation_prompt`.",
156
+ )
157
+ parser.add_argument(
158
+ "--validation_epochs",
159
+ type=int,
160
+ default=50,
161
+ help=(
162
+ "Run validation every X epochs. Validation consists of running the prompt `args.validation_prompt` multiple times: `args.num_validation_videos`."
163
+ ),
164
+ )
165
+ parser.add_argument(
166
+ "--guidance_scale",
167
+ type=float,
168
+ default=6,
169
+ help="The guidance scale to use while sampling validation videos.",
170
+ )
171
+ parser.add_argument(
172
+ "--use_dynamic_cfg",
173
+ action="store_true",
174
+ default=False,
175
+ help="Whether or not to use the default cosine dynamic guidance schedule when sampling validation videos.",
176
+ )
177
+
178
+ # Training information
179
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
180
+ parser.add_argument(
181
+ "--rank",
182
+ type=int,
183
+ default=128,
184
+ help=("The dimension of the LoRA update matrices."),
185
+ )
186
+ parser.add_argument(
187
+ "--lora_alpha",
188
+ type=float,
189
+ default=128,
190
+ help=("The scaling factor to scale LoRA weight update. The actual scaling factor is `lora_alpha / rank`"),
191
+ )
192
+ parser.add_argument(
193
+ "--mixed_precision",
194
+ type=str,
195
+ default=None,
196
+ choices=["no", "fp16", "bf16"],
197
+ help=(
198
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
199
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
200
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
201
+ ),
202
+ )
203
+ parser.add_argument(
204
+ "--output_dir",
205
+ type=str,
206
+ default="cogvideox-lora",
207
+ help="The output directory where the model predictions and checkpoints will be written.",
208
+ )
209
+ parser.add_argument(
210
+ "--height",
211
+ type=int,
212
+ default=480,
213
+ help="All input videos are resized to this height.",
214
+ )
215
+ parser.add_argument(
216
+ "--width",
217
+ type=int,
218
+ default=720,
219
+ help="All input videos are resized to this width.",
220
+ )
221
+ parser.add_argument(
222
+ "--video_reshape_mode",
223
+ type=str,
224
+ default="center",
225
+ help="All input videos are reshaped to this mode. Choose between ['center', 'random', 'none']",
226
+ )
227
+ parser.add_argument("--fps", type=int, default=8, help="All input videos will be used at this FPS.")
228
+ parser.add_argument(
229
+ "--max_num_frames", type=int, default=49, help="All input videos will be truncated to these many frames."
230
+ )
231
+ parser.add_argument(
232
+ "--skip_frames_start",
233
+ type=int,
234
+ default=0,
235
+ help="Number of frames to skip from the beginning of each input video. Useful if training data contains intro sequences.",
236
+ )
237
+ parser.add_argument(
238
+ "--skip_frames_end",
239
+ type=int,
240
+ default=0,
241
+ help="Number of frames to skip from the end of each input video. Useful if training data contains outro sequences.",
242
+ )
243
+ parser.add_argument(
244
+ "--random_flip",
245
+ action="store_true",
246
+ help="whether to randomly flip videos horizontally",
247
+ )
248
+ parser.add_argument(
249
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
250
+ )
251
+ parser.add_argument("--num_train_epochs", type=int, default=1)
252
+ parser.add_argument(
253
+ "--max_train_steps",
254
+ type=int,
255
+ default=None,
256
+ help="Total number of training steps to perform. If provided, overrides `--num_train_epochs`.",
257
+ )
258
+ parser.add_argument(
259
+ "--checkpointing_steps",
260
+ type=int,
261
+ default=500,
262
+ help=(
263
+ "Save a checkpoint of the training state every X updates. These checkpoints can be used both as final"
264
+ " checkpoints in case they are better than the last checkpoint, and are also suitable for resuming"
265
+ " training using `--resume_from_checkpoint`."
266
+ ),
267
+ )
268
+ parser.add_argument(
269
+ "--checkpoints_total_limit",
270
+ type=int,
271
+ default=None,
272
+ help=("Max number of checkpoints to store."),
273
+ )
274
+ parser.add_argument(
275
+ "--resume_from_checkpoint",
276
+ type=str,
277
+ default=None,
278
+ help=(
279
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
280
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
281
+ ),
282
+ )
283
+ parser.add_argument(
284
+ "--gradient_accumulation_steps",
285
+ type=int,
286
+ default=1,
287
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
288
+ )
289
+ parser.add_argument(
290
+ "--gradient_checkpointing",
291
+ action="store_true",
292
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
293
+ )
294
+ parser.add_argument(
295
+ "--learning_rate",
296
+ type=float,
297
+ default=1e-4,
298
+ help="Initial learning rate (after the potential warmup period) to use.",
299
+ )
300
+ parser.add_argument(
301
+ "--scale_lr",
302
+ action="store_true",
303
+ default=False,
304
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
305
+ )
306
+ parser.add_argument(
307
+ "--lr_scheduler",
308
+ type=str,
309
+ default="constant",
310
+ help=(
311
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
312
+ ' "constant", "constant_with_warmup"]'
313
+ ),
314
+ )
315
+ parser.add_argument(
316
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
317
+ )
318
+ parser.add_argument(
319
+ "--lr_num_cycles",
320
+ type=int,
321
+ default=1,
322
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
323
+ )
324
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
325
+ parser.add_argument(
326
+ "--enable_slicing",
327
+ action="store_true",
328
+ default=False,
329
+ help="Whether or not to use VAE slicing for saving memory.",
330
+ )
331
+ parser.add_argument(
332
+ "--enable_tiling",
333
+ action="store_true",
334
+ default=False,
335
+ help="Whether or not to use VAE tiling for saving memory.",
336
+ )
337
+
338
+ # Optimizer
339
+ parser.add_argument(
340
+ "--optimizer",
341
+ type=lambda s: s.lower(),
342
+ default="adam",
343
+ choices=["adam", "adamw", "prodigy"],
344
+ help=("The optimizer type to use."),
345
+ )
346
+ parser.add_argument(
347
+ "--use_8bit_adam",
348
+ action="store_true",
349
+ help="Whether or not to use 8-bit Adam from bitsandbytes. Ignored if optimizer is not set to AdamW",
350
+ )
351
+ parser.add_argument(
352
+ "--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam and Prodigy optimizers."
353
+ )
354
+ parser.add_argument(
355
+ "--adam_beta2", type=float, default=0.95, help="The beta2 parameter for the Adam and Prodigy optimizers."
356
+ )
357
+ parser.add_argument(
358
+ "--prodigy_beta3",
359
+ type=float,
360
+ default=None,
361
+ help="Coefficients for computing the Prodigy optimizer's stepsize using running averages. If set to None, uses the value of square root of beta2.",
362
+ )
363
+ parser.add_argument("--prodigy_decouple", action="store_true", help="Use AdamW style decoupled weight decay")
364
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-04, help="Weight decay to use for unet params")
365
+ parser.add_argument(
366
+ "--adam_epsilon",
367
+ type=float,
368
+ default=1e-08,
369
+ help="Epsilon value for the Adam optimizer and Prodigy optimizers.",
370
+ )
371
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
372
+ parser.add_argument("--prodigy_use_bias_correction", action="store_true", help="Turn on Adam's bias correction.")
373
+ parser.add_argument(
374
+ "--prodigy_safeguard_warmup",
375
+ action="store_true",
376
+ help="Remove lr from the denominator of D estimate to avoid issues during warm-up stage.",
377
+ )
378
+
379
+ # Other information
380
+ parser.add_argument("--tracker_name", type=str, default=None, help="Project tracker name")
381
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
382
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
383
+ parser.add_argument(
384
+ "--hub_model_id",
385
+ type=str,
386
+ default=None,
387
+ help="The name of the repository to keep in sync with the local `output_dir`.",
388
+ )
389
+ parser.add_argument(
390
+ "--logging_dir",
391
+ type=str,
392
+ default="logs",
393
+ help="Directory where logs are stored.",
394
+ )
395
+ parser.add_argument(
396
+ "--allow_tf32",
397
+ action="store_true",
398
+ help=(
399
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
400
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
401
+ ),
402
+ )
403
+ parser.add_argument(
404
+ "--report_to",
405
+ type=str,
406
+ default=None,
407
+ help=(
408
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
409
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
410
+ ),
411
+ )
412
+
413
+ return parser.parse_args()
414
+
415
+
416
+ class VideoDataset(Dataset):
417
+ def __init__(
418
+ self,
419
+ instance_data_root: Optional[str] = None,
420
+ dataset_name: Optional[str] = None,
421
+ dataset_config_name: Optional[str] = None,
422
+ caption_column: str = "text",
423
+ video_column: str = "video",
424
+ height: int = 480,
425
+ width: int = 720,
426
+ video_reshape_mode: str = "center",
427
+ fps: int = 8,
428
+ max_num_frames: int = 49,
429
+ skip_frames_start: int = 0,
430
+ skip_frames_end: int = 0,
431
+ cache_dir: Optional[str] = None,
432
+ id_token: Optional[str] = None,
433
+ ) -> None:
434
+ super().__init__()
435
+
436
+ self.instance_data_root = Path(instance_data_root) if instance_data_root is not None else None
437
+ self.dataset_name = dataset_name
438
+ self.dataset_config_name = dataset_config_name
439
+ self.caption_column = caption_column
440
+ self.video_column = video_column
441
+ self.height = height
442
+ self.width = width
443
+ self.video_reshape_mode = video_reshape_mode
444
+ self.fps = fps
445
+ self.max_num_frames = max_num_frames
446
+ self.skip_frames_start = skip_frames_start
447
+ self.skip_frames_end = skip_frames_end
448
+ self.cache_dir = cache_dir
449
+ self.id_token = id_token or ""
450
+
451
+ if dataset_name is not None:
452
+ self.instance_prompts, self.instance_video_paths = self._load_dataset_from_hub()
453
+ else:
454
+ self.instance_prompts, self.instance_video_paths = self._load_dataset_from_local_path()
455
+
456
+ self.num_instance_videos = len(self.instance_video_paths)
457
+ if self.num_instance_videos != len(self.instance_prompts):
458
+ raise ValueError(
459
+ f"Expected length of instance prompts and videos to be the same but found {len(self.instance_prompts)=} and {len(self.instance_video_paths)=}. Please ensure that the number of caption prompts and videos match in your dataset."
460
+ )
461
+
462
+ self.instance_videos = self._preprocess_data()
463
+
464
+ def __len__(self):
465
+ return self.num_instance_videos
466
+
467
+ def __getitem__(self, index):
468
+ return {
469
+ "instance_prompt": self.id_token + self.instance_prompts[index],
470
+ "instance_video": self.instance_videos[index],
471
+ }
472
+
473
+ def _load_dataset_from_hub(self):
474
+ try:
475
+ from datasets import load_dataset
476
+ except ImportError:
477
+ raise ImportError(
478
+ "You are trying to load your data using the datasets library. If you wish to train using custom "
479
+ "captions please install the datasets library: `pip install datasets`. If you wish to load a "
480
+ "local folder containing images only, specify --instance_data_root instead."
481
+ )
482
+
483
+ # Downloading and loading a dataset from the hub. See more about loading custom images at
484
+ # https://huggingface.co/docs/datasets/v2.0.0/en/dataset_script
485
+ dataset = load_dataset(
486
+ self.dataset_name,
487
+ self.dataset_config_name,
488
+ cache_dir=self.cache_dir,
489
+ )
490
+ column_names = dataset["train"].column_names
491
+
492
+ if self.video_column is None:
493
+ video_column = column_names[0]
494
+ logger.info(f"`video_column` defaulting to {video_column}")
495
+ else:
496
+ video_column = self.video_column
497
+ if video_column not in column_names:
498
+ raise ValueError(
499
+ f"`--video_column` value '{video_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
500
+ )
501
+
502
+ if self.caption_column is None:
503
+ caption_column = column_names[1]
504
+ logger.info(f"`caption_column` defaulting to {caption_column}")
505
+ else:
506
+ caption_column = self.caption_column
507
+ if self.caption_column not in column_names:
508
+ raise ValueError(
509
+ f"`--caption_column` value '{self.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
510
+ )
511
+
512
+ instance_prompts = dataset["train"][caption_column]
513
+ instance_videos = [Path(self.instance_data_root, filepath) for filepath in dataset["train"][video_column]]
514
+
515
+ return instance_prompts, instance_videos
516
+
517
+ def _load_dataset_from_local_path(self):
518
+ if not self.instance_data_root.exists():
519
+ raise ValueError("Instance videos root folder does not exist")
520
+
521
+ prompt_path = self.instance_data_root.joinpath(self.caption_column)
522
+ video_path = self.instance_data_root.joinpath(self.video_column)
523
+
524
+ if not prompt_path.exists() or not prompt_path.is_file():
525
+ raise ValueError(
526
+ "Expected `--caption_column` to be path to a file in `--instance_data_root` containing line-separated text prompts."
527
+ )
528
+ if not video_path.exists() or not video_path.is_file():
529
+ raise ValueError(
530
+ "Expected `--video_column` to be path to a file in `--instance_data_root` containing line-separated paths to video data in the same directory."
531
+ )
532
+
533
+ with open(prompt_path, "r", encoding="utf-8") as file:
534
+ instance_prompts = [line.strip() for line in file.readlines() if len(line.strip()) > 0]
535
+ with open(video_path, "r", encoding="utf-8") as file:
536
+ instance_videos = [
537
+ self.instance_data_root.joinpath(line.strip()) for line in file.readlines() if len(line.strip()) > 0
538
+ ]
539
+
540
+ if any(not path.is_file() for path in instance_videos):
541
+ raise ValueError(
542
+ "Expected '--video_column' to be a path to a file in `--instance_data_root` containing line-separated paths to video data but found at least one path that is not a valid file."
543
+ )
544
+
545
+ return instance_prompts, instance_videos
546
+
547
+ def _resize_for_rectangle_crop(self, arr):
548
+ image_size = self.height, self.width
549
+ reshape_mode = self.video_reshape_mode
550
+ if arr.shape[3] / arr.shape[2] > image_size[1] / image_size[0]:
551
+ arr = resize(
552
+ arr,
553
+ size=[image_size[0], int(arr.shape[3] * image_size[0] / arr.shape[2])],
554
+ interpolation=InterpolationMode.BICUBIC,
555
+ )
556
+ else:
557
+ arr = resize(
558
+ arr,
559
+ size=[int(arr.shape[2] * image_size[1] / arr.shape[3]), image_size[1]],
560
+ interpolation=InterpolationMode.BICUBIC,
561
+ )
562
+
563
+ h, w = arr.shape[2], arr.shape[3]
564
+ arr = arr.squeeze(0)
565
+
566
+ delta_h = h - image_size[0]
567
+ delta_w = w - image_size[1]
568
+
569
+ if reshape_mode == "random" or reshape_mode == "none":
570
+ top = np.random.randint(0, delta_h + 1)
571
+ left = np.random.randint(0, delta_w + 1)
572
+ elif reshape_mode == "center":
573
+ top, left = delta_h // 2, delta_w // 2
574
+ else:
575
+ raise NotImplementedError
576
+ arr = TT.functional.crop(arr, top=top, left=left, height=image_size[0], width=image_size[1])
577
+ return arr
578
+
579
+ def _preprocess_data(self):
580
+ try:
581
+ import decord
582
+ except ImportError:
583
+ raise ImportError(
584
+ "The `decord` package is required for loading the video dataset. Install with `pip install decord`"
585
+ )
586
+
587
+ decord.bridge.set_bridge("torch")
588
+
589
+ progress_dataset_bar = tqdm(
590
+ range(0, len(self.instance_video_paths)),
591
+ desc="Loading progress resize and crop videos",
592
+ )
593
+ videos = []
594
+
595
+ for filename in self.instance_video_paths:
596
+ video_reader = decord.VideoReader(uri=filename.as_posix())
597
+ video_num_frames = len(video_reader)
598
+
599
+ start_frame = min(self.skip_frames_start, video_num_frames)
600
+ end_frame = max(0, video_num_frames - self.skip_frames_end)
601
+ if end_frame <= start_frame:
602
+ frames = video_reader.get_batch([start_frame])
603
+ elif end_frame - start_frame <= self.max_num_frames:
604
+ frames = video_reader.get_batch(list(range(start_frame, end_frame)))
605
+ else:
606
+ indices = list(range(start_frame, end_frame, (end_frame - start_frame) // self.max_num_frames))
607
+ frames = video_reader.get_batch(indices)
608
+
609
+ # Ensure that we don't go over the limit
610
+ frames = frames[: self.max_num_frames]
611
+ selected_num_frames = frames.shape[0]
612
+
613
+ # Choose first (4k + 1) frames as this is how many is required by the VAE
614
+ remainder = (3 + (selected_num_frames % 4)) % 4
615
+ if remainder != 0:
616
+ frames = frames[:-remainder]
617
+ selected_num_frames = frames.shape[0]
618
+
619
+ assert (selected_num_frames - 1) % 4 == 0
620
+
621
+ # Training transforms
622
+ frames = (frames - 127.5) / 127.5
623
+ frames = frames.permute(0, 3, 1, 2) # [F, C, H, W]
624
+ progress_dataset_bar.set_description(
625
+ f"Loading progress Resizing video from {frames.shape[2]}x{frames.shape[3]} to {self.height}x{self.width}"
626
+ )
627
+ frames = self._resize_for_rectangle_crop(frames)
628
+ videos.append(frames.contiguous()) # [F, C, H, W]
629
+ progress_dataset_bar.update(1)
630
+
631
+ progress_dataset_bar.close()
632
+ return videos
633
+
634
+
635
+ def save_model_card(
636
+ repo_id: str,
637
+ videos=None,
638
+ base_model: str = None,
639
+ validation_prompt=None,
640
+ repo_folder=None,
641
+ fps=8,
642
+ ):
643
+ widget_dict = []
644
+ if videos is not None:
645
+ for i, video in enumerate(videos):
646
+ export_to_video(video, os.path.join(repo_folder, f"final_video_{i}.mp4", fps=fps))
647
+ widget_dict.append(
648
+ {"text": validation_prompt if validation_prompt else " ", "output": {"url": f"video_{i}.mp4"}}
649
+ )
650
+
651
+ model_description = f"""
652
+ # CogVideoX LoRA - {repo_id}
653
+
654
+ <Gallery />
655
+
656
+ ## Model description
657
+
658
+ These are {repo_id} LoRA weights for {base_model}.
659
+
660
+ The weights were trained using the [CogVideoX Diffusers trainer](https://github.com/huggingface/diffusers/blob/main/examples/cogvideo/train_cogvideox_lora.py).
661
+
662
+ Was LoRA for the text encoder enabled? No.
663
+
664
+ ## Download model
665
+
666
+ [Download the *.safetensors LoRA]({repo_id}/tree/main) in the Files & versions tab.
667
+
668
+ ## Use it with the [🧨 diffusers library](https://github.com/huggingface/diffusers)
669
+
670
+ ```py
671
+ from diffusers import CogVideoXPipeline
672
+ import torch
673
+
674
+ pipe = CogVideoXPipeline.from_pretrained("THUDM/CogVideoX-5b", torch_dtype=torch.bfloat16).to("cuda")
675
+ pipe.load_lora_weights("{repo_id}", weight_name="pytorch_lora_weights.safetensors", adapter_name=["cogvideox-lora"])
676
+
677
+ # The LoRA adapter weights are determined by what was used for training.
678
+ # In this case, we assume `--lora_alpha` is 32 and `--rank` is 64.
679
+ # It can be made lower or higher from what was used in training to decrease or amplify the effect
680
+ # of the LoRA upto a tolerance, beyond which one might notice no effect at all or overflows.
681
+ pipe.set_adapters(["cogvideox-lora"], [32 / 64])
682
+
683
+ video = pipe("{validation_prompt}", guidance_scale=6, use_dynamic_cfg=True).frames[0]
684
+ ```
685
+
686
+ For more details, including weighting, merging and fusing LoRAs, check the [documentation on loading LoRAs in diffusers](https://huggingface.co/docs/diffusers/main/en/using-diffusers/loading_adapters)
687
+
688
+ ## License
689
+
690
+ Please adhere to the licensing terms as described [here](https://huggingface.co/THUDM/CogVideoX-5b/blob/main/LICENSE) and [here](https://huggingface.co/THUDM/CogVideoX-2b/blob/main/LICENSE).
691
+ """
692
+ model_card = load_or_create_model_card(
693
+ repo_id_or_path=repo_id,
694
+ from_training=True,
695
+ license="other",
696
+ base_model=base_model,
697
+ prompt=validation_prompt,
698
+ model_description=model_description,
699
+ widget=widget_dict,
700
+ )
701
+ tags = [
702
+ "text-to-video",
703
+ "diffusers-training",
704
+ "diffusers",
705
+ "lora",
706
+ "cogvideox",
707
+ "cogvideox-diffusers",
708
+ "template:sd-lora",
709
+ ]
710
+
711
+ model_card = populate_model_card(model_card, tags=tags)
712
+ model_card.save(os.path.join(repo_folder, "README.md"))
713
+
714
+
715
+ def log_validation(
716
+ pipe,
717
+ args,
718
+ accelerator,
719
+ pipeline_args,
720
+ epoch,
721
+ is_final_validation: bool = False,
722
+ ):
723
+ logger.info(
724
+ f"Running validation... \n Generating {args.num_validation_videos} videos with prompt: {pipeline_args['prompt']}."
725
+ )
726
+ # We train on the simplified learning objective. If we were previously predicting a variance, we need the scheduler to ignore it
727
+ scheduler_args = {}
728
+
729
+ if "variance_type" in pipe.scheduler.config:
730
+ variance_type = pipe.scheduler.config.variance_type
731
+
732
+ if variance_type in ["learned", "learned_range"]:
733
+ variance_type = "fixed_small"
734
+
735
+ scheduler_args["variance_type"] = variance_type
736
+
737
+ pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config, **scheduler_args)
738
+ pipe = pipe.to(accelerator.device)
739
+ # pipe.set_progress_bar_config(disable=True)
740
+
741
+ # run inference
742
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed) if args.seed is not None else None
743
+
744
+ videos = []
745
+ for _ in range(args.num_validation_videos):
746
+ pt_images = pipe(**pipeline_args, generator=generator, output_type="pt").frames[0]
747
+ pt_images = torch.stack([pt_images[i] for i in range(pt_images.shape[0])])
748
+
749
+ image_np = VaeImageProcessor.pt_to_numpy(pt_images)
750
+ image_pil = VaeImageProcessor.numpy_to_pil(image_np)
751
+
752
+ videos.append(image_pil)
753
+
754
+ for tracker in accelerator.trackers:
755
+ phase_name = "test" if is_final_validation else "validation"
756
+ if tracker.name == "wandb":
757
+ video_filenames = []
758
+ for i, video in enumerate(videos):
759
+ prompt = (
760
+ pipeline_args["prompt"][:25]
761
+ .replace(" ", "_")
762
+ .replace(" ", "_")
763
+ .replace("'", "_")
764
+ .replace('"', "_")
765
+ .replace("/", "_")
766
+ )
767
+ filename = os.path.join(args.output_dir, f"{phase_name}_video_{i}_{prompt}.mp4")
768
+ export_to_video(video, filename, fps=8)
769
+ video_filenames.append(filename)
770
+
771
+ tracker.log(
772
+ {
773
+ phase_name: [
774
+ wandb.Video(filename, caption=f"{i}: {pipeline_args['prompt']}")
775
+ for i, filename in enumerate(video_filenames)
776
+ ]
777
+ }
778
+ )
779
+
780
+ del pipe
781
+ free_memory()
782
+
783
+ return videos
784
+
785
+
786
+ def _get_t5_prompt_embeds(
787
+ tokenizer: T5Tokenizer,
788
+ text_encoder: T5EncoderModel,
789
+ prompt: Union[str, List[str]],
790
+ num_videos_per_prompt: int = 1,
791
+ max_sequence_length: int = 226,
792
+ device: Optional[torch.device] = None,
793
+ dtype: Optional[torch.dtype] = None,
794
+ text_input_ids=None,
795
+ ):
796
+ prompt = [prompt] if isinstance(prompt, str) else prompt
797
+ batch_size = len(prompt)
798
+
799
+ if tokenizer is not None:
800
+ text_inputs = tokenizer(
801
+ prompt,
802
+ padding="max_length",
803
+ max_length=max_sequence_length,
804
+ truncation=True,
805
+ add_special_tokens=True,
806
+ return_tensors="pt",
807
+ )
808
+ text_input_ids = text_inputs.input_ids
809
+ else:
810
+ if text_input_ids is None:
811
+ raise ValueError("`text_input_ids` must be provided when the tokenizer is not specified.")
812
+
813
+ prompt_embeds = text_encoder(text_input_ids.to(device))[0]
814
+ prompt_embeds = prompt_embeds.to(dtype=dtype, device=device)
815
+
816
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
817
+ _, seq_len, _ = prompt_embeds.shape
818
+ prompt_embeds = prompt_embeds.repeat(1, num_videos_per_prompt, 1)
819
+ prompt_embeds = prompt_embeds.view(batch_size * num_videos_per_prompt, seq_len, -1)
820
+
821
+ return prompt_embeds
822
+
823
+
824
+ def encode_prompt(
825
+ tokenizer: T5Tokenizer,
826
+ text_encoder: T5EncoderModel,
827
+ prompt: Union[str, List[str]],
828
+ num_videos_per_prompt: int = 1,
829
+ max_sequence_length: int = 226,
830
+ device: Optional[torch.device] = None,
831
+ dtype: Optional[torch.dtype] = None,
832
+ text_input_ids=None,
833
+ ):
834
+ prompt = [prompt] if isinstance(prompt, str) else prompt
835
+ prompt_embeds = _get_t5_prompt_embeds(
836
+ tokenizer,
837
+ text_encoder,
838
+ prompt=prompt,
839
+ num_videos_per_prompt=num_videos_per_prompt,
840
+ max_sequence_length=max_sequence_length,
841
+ device=device,
842
+ dtype=dtype,
843
+ text_input_ids=text_input_ids,
844
+ )
845
+ return prompt_embeds
846
+
847
+
848
+ def compute_prompt_embeddings(
849
+ tokenizer, text_encoder, prompt, max_sequence_length, device, dtype, requires_grad: bool = False
850
+ ):
851
+ if requires_grad:
852
+ prompt_embeds = encode_prompt(
853
+ tokenizer,
854
+ text_encoder,
855
+ prompt,
856
+ num_videos_per_prompt=1,
857
+ max_sequence_length=max_sequence_length,
858
+ device=device,
859
+ dtype=dtype,
860
+ )
861
+ else:
862
+ with torch.no_grad():
863
+ prompt_embeds = encode_prompt(
864
+ tokenizer,
865
+ text_encoder,
866
+ prompt,
867
+ num_videos_per_prompt=1,
868
+ max_sequence_length=max_sequence_length,
869
+ device=device,
870
+ dtype=dtype,
871
+ )
872
+ return prompt_embeds
873
+
874
+
875
+ def prepare_rotary_positional_embeddings(
876
+ height: int,
877
+ width: int,
878
+ num_frames: int,
879
+ vae_scale_factor_spatial: int = 8,
880
+ patch_size: int = 2,
881
+ attention_head_dim: int = 64,
882
+ device: Optional[torch.device] = None,
883
+ base_height: int = 480,
884
+ base_width: int = 720,
885
+ ) -> Tuple[torch.Tensor, torch.Tensor]:
886
+ grid_height = height // (vae_scale_factor_spatial * patch_size)
887
+ grid_width = width // (vae_scale_factor_spatial * patch_size)
888
+ base_size_width = base_width // (vae_scale_factor_spatial * patch_size)
889
+ base_size_height = base_height // (vae_scale_factor_spatial * patch_size)
890
+
891
+ grid_crops_coords = get_resize_crop_region_for_grid((grid_height, grid_width), base_size_width, base_size_height)
892
+ freqs_cos, freqs_sin = get_3d_rotary_pos_embed(
893
+ embed_dim=attention_head_dim,
894
+ crops_coords=grid_crops_coords,
895
+ grid_size=(grid_height, grid_width),
896
+ temporal_size=num_frames,
897
+ device=device,
898
+ )
899
+
900
+ return freqs_cos, freqs_sin
901
+
902
+
903
+ def get_optimizer(args, params_to_optimize, use_deepspeed: bool = False):
904
+ # Use DeepSpeed optimizer
905
+ if use_deepspeed:
906
+ from accelerate.utils import DummyOptim
907
+
908
+ return DummyOptim(
909
+ params_to_optimize,
910
+ lr=args.learning_rate,
911
+ betas=(args.adam_beta1, args.adam_beta2),
912
+ eps=args.adam_epsilon,
913
+ weight_decay=args.adam_weight_decay,
914
+ )
915
+
916
+ # Optimizer creation
917
+ supported_optimizers = ["adam", "adamw", "prodigy"]
918
+ if args.optimizer not in supported_optimizers:
919
+ logger.warning(
920
+ f"Unsupported choice of optimizer: {args.optimizer}. Supported optimizers include {supported_optimizers}. Defaulting to AdamW"
921
+ )
922
+ args.optimizer = "adamw"
923
+
924
+ if args.use_8bit_adam and args.optimizer.lower() not in ["adam", "adamw"]:
925
+ logger.warning(
926
+ f"use_8bit_adam is ignored when optimizer is not set to 'Adam' or 'AdamW'. Optimizer was "
927
+ f"set to {args.optimizer.lower()}"
928
+ )
929
+
930
+ if args.use_8bit_adam:
931
+ try:
932
+ import bitsandbytes as bnb
933
+ except ImportError:
934
+ raise ImportError(
935
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
936
+ )
937
+
938
+ if args.optimizer.lower() == "adamw":
939
+ optimizer_class = bnb.optim.AdamW8bit if args.use_8bit_adam else torch.optim.AdamW
940
+
941
+ optimizer = optimizer_class(
942
+ params_to_optimize,
943
+ betas=(args.adam_beta1, args.adam_beta2),
944
+ eps=args.adam_epsilon,
945
+ weight_decay=args.adam_weight_decay,
946
+ )
947
+ elif args.optimizer.lower() == "adam":
948
+ optimizer_class = bnb.optim.Adam8bit if args.use_8bit_adam else torch.optim.Adam
949
+
950
+ optimizer = optimizer_class(
951
+ params_to_optimize,
952
+ betas=(args.adam_beta1, args.adam_beta2),
953
+ eps=args.adam_epsilon,
954
+ weight_decay=args.adam_weight_decay,
955
+ )
956
+ elif args.optimizer.lower() == "prodigy":
957
+ try:
958
+ import prodigyopt
959
+ except ImportError:
960
+ raise ImportError("To use Prodigy, please install the prodigyopt library: `pip install prodigyopt`")
961
+
962
+ optimizer_class = prodigyopt.Prodigy
963
+
964
+ if args.learning_rate <= 0.1:
965
+ logger.warning(
966
+ "Learning rate is too low. When using prodigy, it's generally better to set learning rate around 1.0"
967
+ )
968
+
969
+ optimizer = optimizer_class(
970
+ params_to_optimize,
971
+ betas=(args.adam_beta1, args.adam_beta2),
972
+ beta3=args.prodigy_beta3,
973
+ weight_decay=args.adam_weight_decay,
974
+ eps=args.adam_epsilon,
975
+ decouple=args.prodigy_decouple,
976
+ use_bias_correction=args.prodigy_use_bias_correction,
977
+ safeguard_warmup=args.prodigy_safeguard_warmup,
978
+ )
979
+
980
+ return optimizer
981
+
982
+
983
+ def main(args):
984
+ if args.report_to == "wandb" and args.hub_token is not None:
985
+ raise ValueError(
986
+ "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
987
+ " Please use `hf auth login` to authenticate with the Hub."
988
+ )
989
+
990
+ if torch.backends.mps.is_available() and args.mixed_precision == "bf16":
991
+ # due to pytorch#99272, MPS does not yet support bfloat16.
992
+ raise ValueError(
993
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
994
+ )
995
+
996
+ logging_dir = Path(args.output_dir, args.logging_dir)
997
+
998
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=logging_dir)
999
+ kwargs = DistributedDataParallelKwargs(find_unused_parameters=True)
1000
+ accelerator = Accelerator(
1001
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
1002
+ mixed_precision=args.mixed_precision,
1003
+ log_with=args.report_to,
1004
+ project_config=accelerator_project_config,
1005
+ kwargs_handlers=[kwargs],
1006
+ )
1007
+
1008
+ # Disable AMP for MPS.
1009
+ if torch.backends.mps.is_available():
1010
+ accelerator.native_amp = False
1011
+
1012
+ if args.report_to == "wandb":
1013
+ if not is_wandb_available():
1014
+ raise ImportError("Make sure to install wandb if you want to use it for logging during training.")
1015
+
1016
+ # Make one log on every process with the configuration for debugging.
1017
+ logging.basicConfig(
1018
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
1019
+ datefmt="%m/%d/%Y %H:%M:%S",
1020
+ level=logging.INFO,
1021
+ )
1022
+ logger.info(accelerator.state, main_process_only=False)
1023
+ if accelerator.is_local_main_process:
1024
+ transformers.utils.logging.set_verbosity_warning()
1025
+ diffusers.utils.logging.set_verbosity_info()
1026
+ else:
1027
+ transformers.utils.logging.set_verbosity_error()
1028
+ diffusers.utils.logging.set_verbosity_error()
1029
+
1030
+ # If passed along, set the training seed now.
1031
+ if args.seed is not None:
1032
+ set_seed(args.seed)
1033
+
1034
+ # Handle the repository creation
1035
+ if accelerator.is_main_process:
1036
+ if args.output_dir is not None:
1037
+ os.makedirs(args.output_dir, exist_ok=True)
1038
+
1039
+ if args.push_to_hub:
1040
+ repo_id = create_repo(
1041
+ repo_id=args.hub_model_id or Path(args.output_dir).name,
1042
+ exist_ok=True,
1043
+ ).repo_id
1044
+
1045
+ # Prepare models and scheduler
1046
+ tokenizer = AutoTokenizer.from_pretrained(
1047
+ args.pretrained_model_name_or_path, subfolder="tokenizer", revision=args.revision
1048
+ )
1049
+
1050
+ text_encoder = T5EncoderModel.from_pretrained(
1051
+ args.pretrained_model_name_or_path, subfolder="text_encoder", revision=args.revision
1052
+ )
1053
+
1054
+ # CogVideoX-2b weights are stored in float16
1055
+ # CogVideoX-5b and CogVideoX-5b-I2V weights are stored in bfloat16
1056
+ load_dtype = torch.bfloat16 if "5b" in args.pretrained_model_name_or_path.lower() else torch.float16
1057
+ transformer = CogVideoXTransformer3DModel.from_pretrained(
1058
+ args.pretrained_model_name_or_path,
1059
+ subfolder="transformer",
1060
+ torch_dtype=load_dtype,
1061
+ revision=args.revision,
1062
+ variant=args.variant,
1063
+ )
1064
+
1065
+ vae = AutoencoderKLCogVideoX.from_pretrained(
1066
+ args.pretrained_model_name_or_path, subfolder="vae", revision=args.revision, variant=args.variant
1067
+ )
1068
+
1069
+ scheduler = CogVideoXDPMScheduler.from_pretrained(args.pretrained_model_name_or_path, subfolder="scheduler")
1070
+
1071
+ if args.enable_slicing:
1072
+ vae.enable_slicing()
1073
+ if args.enable_tiling:
1074
+ vae.enable_tiling()
1075
+
1076
+ # We only train the additional adapter LoRA layers
1077
+ text_encoder.requires_grad_(False)
1078
+ transformer.requires_grad_(False)
1079
+ vae.requires_grad_(False)
1080
+
1081
+ # For mixed precision training we cast all non-trainable weights (vae, text_encoder and transformer) to half-precision
1082
+ # as these weights are only used for inference, keeping weights in full precision is not required.
1083
+ weight_dtype = torch.float32
1084
+ if accelerator.state.deepspeed_plugin:
1085
+ # DeepSpeed is handling precision, use what's in the DeepSpeed config
1086
+ if (
1087
+ "fp16" in accelerator.state.deepspeed_plugin.deepspeed_config
1088
+ and accelerator.state.deepspeed_plugin.deepspeed_config["fp16"]["enabled"]
1089
+ ):
1090
+ weight_dtype = torch.float16
1091
+ if (
1092
+ "bf16" in accelerator.state.deepspeed_plugin.deepspeed_config
1093
+ and accelerator.state.deepspeed_plugin.deepspeed_config["bf16"]["enabled"]
1094
+ ):
1095
+ weight_dtype = torch.float16
1096
+ else:
1097
+ if accelerator.mixed_precision == "fp16":
1098
+ weight_dtype = torch.float16
1099
+ elif accelerator.mixed_precision == "bf16":
1100
+ weight_dtype = torch.bfloat16
1101
+
1102
+ if torch.backends.mps.is_available() and weight_dtype == torch.bfloat16:
1103
+ # due to pytorch#99272, MPS does not yet support bfloat16.
1104
+ raise ValueError(
1105
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
1106
+ )
1107
+
1108
+ text_encoder.to(accelerator.device, dtype=weight_dtype)
1109
+ transformer.to(accelerator.device, dtype=weight_dtype)
1110
+ vae.to(accelerator.device, dtype=weight_dtype)
1111
+
1112
+ if args.gradient_checkpointing:
1113
+ transformer.enable_gradient_checkpointing()
1114
+
1115
+ # now we will add new LoRA weights to the attention layers
1116
+ transformer_lora_config = LoraConfig(
1117
+ r=args.rank,
1118
+ lora_alpha=args.lora_alpha,
1119
+ init_lora_weights=True,
1120
+ target_modules=["to_k", "to_q", "to_v", "to_out.0"],
1121
+ )
1122
+ transformer.add_adapter(transformer_lora_config)
1123
+
1124
+ def unwrap_model(model):
1125
+ model = accelerator.unwrap_model(model)
1126
+ model = model._orig_mod if is_compiled_module(model) else model
1127
+ return model
1128
+
1129
+ # create custom saving & loading hooks so that `accelerator.save_state(...)` serializes in a nice format
1130
+ def save_model_hook(models, weights, output_dir):
1131
+ if accelerator.is_main_process:
1132
+ transformer_lora_layers_to_save = None
1133
+
1134
+ for model in models:
1135
+ if isinstance(model, type(unwrap_model(transformer))):
1136
+ transformer_lora_layers_to_save = get_peft_model_state_dict(model)
1137
+ else:
1138
+ raise ValueError(f"unexpected save model: {model.__class__}")
1139
+
1140
+ # make sure to pop weight so that corresponding model is not saved again
1141
+ weights.pop()
1142
+
1143
+ CogVideoXPipeline.save_lora_weights(
1144
+ output_dir,
1145
+ transformer_lora_layers=transformer_lora_layers_to_save,
1146
+ )
1147
+
1148
+ def load_model_hook(models, input_dir):
1149
+ transformer_ = None
1150
+
1151
+ while len(models) > 0:
1152
+ model = models.pop()
1153
+
1154
+ if isinstance(model, type(unwrap_model(transformer))):
1155
+ transformer_ = model
1156
+ else:
1157
+ raise ValueError(f"Unexpected save model: {model.__class__}")
1158
+
1159
+ lora_state_dict = CogVideoXPipeline.lora_state_dict(input_dir)
1160
+
1161
+ transformer_state_dict = {
1162
+ f"{k.replace('transformer.', '')}": v for k, v in lora_state_dict.items() if k.startswith("transformer.")
1163
+ }
1164
+ transformer_state_dict = convert_unet_state_dict_to_peft(transformer_state_dict)
1165
+ incompatible_keys = set_peft_model_state_dict(transformer_, transformer_state_dict, adapter_name="default")
1166
+ if incompatible_keys is not None:
1167
+ # check only for unexpected keys
1168
+ unexpected_keys = getattr(incompatible_keys, "unexpected_keys", None)
1169
+ if unexpected_keys:
1170
+ logger.warning(
1171
+ f"Loading adapter weights from state_dict led to unexpected keys not found in the model: "
1172
+ f" {unexpected_keys}. "
1173
+ )
1174
+
1175
+ # Make sure the trainable params are in float32. This is again needed since the base models
1176
+ # are in `weight_dtype`. More details:
1177
+ # https://github.com/huggingface/diffusers/pull/6514#discussion_r1449796804
1178
+ if args.mixed_precision == "fp16":
1179
+ # only upcast trainable parameters (LoRA) into fp32
1180
+ cast_training_params([transformer_])
1181
+
1182
+ accelerator.register_save_state_pre_hook(save_model_hook)
1183
+ accelerator.register_load_state_pre_hook(load_model_hook)
1184
+
1185
+ # Enable TF32 for faster training on Ampere GPUs,
1186
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
1187
+ if args.allow_tf32 and torch.cuda.is_available():
1188
+ torch.backends.cuda.matmul.allow_tf32 = True
1189
+
1190
+ if args.scale_lr:
1191
+ args.learning_rate = (
1192
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
1193
+ )
1194
+
1195
+ # Make sure the trainable params are in float32.
1196
+ if args.mixed_precision == "fp16":
1197
+ # only upcast trainable parameters (LoRA) into fp32
1198
+ cast_training_params([transformer], dtype=torch.float32)
1199
+
1200
+ transformer_lora_parameters = list(filter(lambda p: p.requires_grad, transformer.parameters()))
1201
+
1202
+ # Optimization parameters
1203
+ transformer_parameters_with_lr = {"params": transformer_lora_parameters, "lr": args.learning_rate}
1204
+ params_to_optimize = [transformer_parameters_with_lr]
1205
+
1206
+ use_deepspeed_optimizer = (
1207
+ accelerator.state.deepspeed_plugin is not None
1208
+ and "optimizer" in accelerator.state.deepspeed_plugin.deepspeed_config
1209
+ )
1210
+ use_deepspeed_scheduler = (
1211
+ accelerator.state.deepspeed_plugin is not None
1212
+ and "scheduler" in accelerator.state.deepspeed_plugin.deepspeed_config
1213
+ )
1214
+
1215
+ optimizer = get_optimizer(args, params_to_optimize, use_deepspeed=use_deepspeed_optimizer)
1216
+
1217
+ # Dataset and DataLoader
1218
+ train_dataset = VideoDataset(
1219
+ instance_data_root=args.instance_data_root,
1220
+ dataset_name=args.dataset_name,
1221
+ dataset_config_name=args.dataset_config_name,
1222
+ caption_column=args.caption_column,
1223
+ video_column=args.video_column,
1224
+ height=args.height,
1225
+ width=args.width,
1226
+ video_reshape_mode=args.video_reshape_mode,
1227
+ fps=args.fps,
1228
+ max_num_frames=args.max_num_frames,
1229
+ skip_frames_start=args.skip_frames_start,
1230
+ skip_frames_end=args.skip_frames_end,
1231
+ cache_dir=args.cache_dir,
1232
+ id_token=args.id_token,
1233
+ )
1234
+
1235
+ def encode_video(video, bar):
1236
+ bar.update(1)
1237
+ video = video.to(accelerator.device, dtype=vae.dtype).unsqueeze(0)
1238
+ video = video.permute(0, 2, 1, 3, 4) # [B, C, F, H, W]
1239
+ latent_dist = vae.encode(video).latent_dist
1240
+ return latent_dist
1241
+
1242
+ progress_encode_bar = tqdm(
1243
+ range(0, len(train_dataset.instance_videos)),
1244
+ desc="Loading Encode videos",
1245
+ )
1246
+ train_dataset.instance_videos = [
1247
+ encode_video(video, progress_encode_bar) for video in train_dataset.instance_videos
1248
+ ]
1249
+ progress_encode_bar.close()
1250
+
1251
+ def collate_fn(examples):
1252
+ videos = [example["instance_video"].sample() * vae.config.scaling_factor for example in examples]
1253
+ prompts = [example["instance_prompt"] for example in examples]
1254
+
1255
+ videos = torch.cat(videos)
1256
+ videos = videos.permute(0, 2, 1, 3, 4)
1257
+ videos = videos.to(memory_format=torch.contiguous_format).float()
1258
+
1259
+ return {
1260
+ "videos": videos,
1261
+ "prompts": prompts,
1262
+ }
1263
+
1264
+ train_dataloader = DataLoader(
1265
+ train_dataset,
1266
+ batch_size=args.train_batch_size,
1267
+ shuffle=True,
1268
+ collate_fn=collate_fn,
1269
+ num_workers=args.dataloader_num_workers,
1270
+ )
1271
+
1272
+ # Scheduler and math around the number of training steps.
1273
+ overrode_max_train_steps = False
1274
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1275
+ if args.max_train_steps is None:
1276
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1277
+ overrode_max_train_steps = True
1278
+
1279
+ if use_deepspeed_scheduler:
1280
+ from accelerate.utils import DummyScheduler
1281
+
1282
+ lr_scheduler = DummyScheduler(
1283
+ name=args.lr_scheduler,
1284
+ optimizer=optimizer,
1285
+ total_num_steps=args.max_train_steps * accelerator.num_processes,
1286
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1287
+ )
1288
+ else:
1289
+ lr_scheduler = get_scheduler(
1290
+ args.lr_scheduler,
1291
+ optimizer=optimizer,
1292
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
1293
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
1294
+ num_cycles=args.lr_num_cycles,
1295
+ power=args.lr_power,
1296
+ )
1297
+
1298
+ # Prepare everything with our `accelerator`.
1299
+ transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
1300
+ transformer, optimizer, train_dataloader, lr_scheduler
1301
+ )
1302
+
1303
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
1304
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
1305
+ if overrode_max_train_steps:
1306
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
1307
+ # Afterwards we recalculate our number of training epochs
1308
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
1309
+
1310
+ # We need to initialize the trackers we use, and also store our configuration.
1311
+ # The trackers initializes automatically on the main process.
1312
+ if accelerator.is_main_process:
1313
+ tracker_name = args.tracker_name or "cogvideox-lora"
1314
+ accelerator.init_trackers(tracker_name, config=vars(args))
1315
+
1316
+ # Train!
1317
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
1318
+ num_trainable_parameters = sum(param.numel() for model in params_to_optimize for param in model["params"])
1319
+
1320
+ logger.info("***** Running training *****")
1321
+ logger.info(f" Num trainable parameters = {num_trainable_parameters}")
1322
+ logger.info(f" Num examples = {len(train_dataset)}")
1323
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
1324
+ logger.info(f" Num epochs = {args.num_train_epochs}")
1325
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
1326
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
1327
+ logger.info(f" Gradient accumulation steps = {args.gradient_accumulation_steps}")
1328
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
1329
+ global_step = 0
1330
+ first_epoch = 0
1331
+
1332
+ # Potentially load in the weights and states from a previous save
1333
+ if not args.resume_from_checkpoint:
1334
+ initial_global_step = 0
1335
+ else:
1336
+ if args.resume_from_checkpoint != "latest":
1337
+ path = os.path.basename(args.resume_from_checkpoint)
1338
+ else:
1339
+ # Get the mos recent checkpoint
1340
+ dirs = os.listdir(args.output_dir)
1341
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
1342
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
1343
+ path = dirs[-1] if len(dirs) > 0 else None
1344
+
1345
+ if path is None:
1346
+ accelerator.print(
1347
+ f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run."
1348
+ )
1349
+ args.resume_from_checkpoint = None
1350
+ initial_global_step = 0
1351
+ else:
1352
+ accelerator.print(f"Resuming from checkpoint {path}")
1353
+ accelerator.load_state(os.path.join(args.output_dir, path))
1354
+ global_step = int(path.split("-")[1])
1355
+
1356
+ initial_global_step = global_step
1357
+ first_epoch = global_step // num_update_steps_per_epoch
1358
+
1359
+ progress_bar = tqdm(
1360
+ range(0, args.max_train_steps),
1361
+ initial=initial_global_step,
1362
+ desc="Steps",
1363
+ # Only show the progress bar once on each machine.
1364
+ disable=not accelerator.is_local_main_process,
1365
+ )
1366
+ vae_scale_factor_spatial = 2 ** (len(vae.config.block_out_channels) - 1)
1367
+
1368
+ # For DeepSpeed training
1369
+ model_config = transformer.module.config if hasattr(transformer, "module") else transformer.config
1370
+
1371
+ for epoch in range(first_epoch, args.num_train_epochs):
1372
+ transformer.train()
1373
+
1374
+ for step, batch in enumerate(train_dataloader):
1375
+ models_to_accumulate = [transformer]
1376
+
1377
+ with accelerator.accumulate(models_to_accumulate):
1378
+ model_input = batch["videos"].to(dtype=weight_dtype) # [B, F, C, H, W]
1379
+ prompts = batch["prompts"]
1380
+
1381
+ # encode prompts
1382
+ prompt_embeds = compute_prompt_embeddings(
1383
+ tokenizer,
1384
+ text_encoder,
1385
+ prompts,
1386
+ model_config.max_text_seq_length,
1387
+ accelerator.device,
1388
+ weight_dtype,
1389
+ requires_grad=False,
1390
+ )
1391
+
1392
+ # Sample noise that will be added to the latents
1393
+ noise = torch.randn_like(model_input)
1394
+ batch_size, num_frames, num_channels, height, width = model_input.shape
1395
+
1396
+ # Sample a random timestep for each image
1397
+ timesteps = torch.randint(
1398
+ 0, scheduler.config.num_train_timesteps, (batch_size,), device=model_input.device
1399
+ )
1400
+ timesteps = timesteps.long()
1401
+
1402
+ # Prepare rotary embeds
1403
+ image_rotary_emb = (
1404
+ prepare_rotary_positional_embeddings(
1405
+ height=args.height,
1406
+ width=args.width,
1407
+ num_frames=num_frames,
1408
+ vae_scale_factor_spatial=vae_scale_factor_spatial,
1409
+ patch_size=model_config.patch_size,
1410
+ attention_head_dim=model_config.attention_head_dim,
1411
+ device=accelerator.device,
1412
+ )
1413
+ if model_config.use_rotary_positional_embeddings
1414
+ else None
1415
+ )
1416
+
1417
+ # Add noise to the model input according to the noise magnitude at each timestep
1418
+ # (this is the forward diffusion process)
1419
+ noisy_model_input = scheduler.add_noise(model_input, noise, timesteps)
1420
+
1421
+ # Predict the noise residual
1422
+ model_output = transformer(
1423
+ hidden_states=noisy_model_input,
1424
+ encoder_hidden_states=prompt_embeds,
1425
+ timestep=timesteps,
1426
+ image_rotary_emb=image_rotary_emb,
1427
+ return_dict=False,
1428
+ )[0]
1429
+ model_pred = scheduler.get_velocity(model_output, noisy_model_input, timesteps)
1430
+
1431
+ alphas_cumprod = scheduler.alphas_cumprod[timesteps]
1432
+ weights = 1 / (1 - alphas_cumprod)
1433
+ while len(weights.shape) < len(model_pred.shape):
1434
+ weights = weights.unsqueeze(-1)
1435
+
1436
+ target = model_input
1437
+
1438
+ loss = torch.mean((weights * (model_pred - target) ** 2).reshape(batch_size, -1), dim=1)
1439
+ loss = loss.mean()
1440
+ accelerator.backward(loss)
1441
+
1442
+ if accelerator.sync_gradients:
1443
+ params_to_clip = transformer.parameters()
1444
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1445
+
1446
+ if accelerator.state.deepspeed_plugin is None:
1447
+ optimizer.step()
1448
+ optimizer.zero_grad()
1449
+
1450
+ lr_scheduler.step()
1451
+
1452
+ # Checks if the accelerator has performed an optimization step behind the scenes
1453
+ if accelerator.sync_gradients:
1454
+ progress_bar.update(1)
1455
+ global_step += 1
1456
+
1457
+ if accelerator.is_main_process or accelerator.distributed_type == DistributedType.DEEPSPEED:
1458
+ if global_step % args.checkpointing_steps == 0:
1459
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1460
+ if args.checkpoints_total_limit is not None:
1461
+ checkpoints = os.listdir(args.output_dir)
1462
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1463
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1464
+
1465
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1466
+ if len(checkpoints) >= args.checkpoints_total_limit:
1467
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1468
+ removing_checkpoints = checkpoints[0:num_to_remove]
1469
+
1470
+ logger.info(
1471
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1472
+ )
1473
+ logger.info(f"Removing checkpoints: {', '.join(removing_checkpoints)}")
1474
+
1475
+ for removing_checkpoint in removing_checkpoints:
1476
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1477
+ shutil.rmtree(removing_checkpoint)
1478
+
1479
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1480
+ accelerator.save_state(save_path)
1481
+ logger.info(f"Saved state to {save_path}")
1482
+
1483
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1484
+ progress_bar.set_postfix(**logs)
1485
+ accelerator.log(logs, step=global_step)
1486
+
1487
+ if global_step >= args.max_train_steps:
1488
+ break
1489
+
1490
+ if accelerator.is_main_process:
1491
+ if args.validation_prompt is not None and (epoch + 1) % args.validation_epochs == 0:
1492
+ # Create pipeline
1493
+ pipe = CogVideoXPipeline.from_pretrained(
1494
+ args.pretrained_model_name_or_path,
1495
+ transformer=unwrap_model(transformer),
1496
+ text_encoder=unwrap_model(text_encoder),
1497
+ scheduler=scheduler,
1498
+ revision=args.revision,
1499
+ variant=args.variant,
1500
+ torch_dtype=weight_dtype,
1501
+ )
1502
+
1503
+ validation_prompts = args.validation_prompt.split(args.validation_prompt_separator)
1504
+ for validation_prompt in validation_prompts:
1505
+ pipeline_args = {
1506
+ "prompt": validation_prompt,
1507
+ "guidance_scale": args.guidance_scale,
1508
+ "use_dynamic_cfg": args.use_dynamic_cfg,
1509
+ "height": args.height,
1510
+ "width": args.width,
1511
+ }
1512
+
1513
+ validation_outputs = log_validation(
1514
+ pipe=pipe,
1515
+ args=args,
1516
+ accelerator=accelerator,
1517
+ pipeline_args=pipeline_args,
1518
+ epoch=epoch,
1519
+ )
1520
+
1521
+ # Save the lora layers
1522
+ accelerator.wait_for_everyone()
1523
+ if accelerator.is_main_process:
1524
+ transformer = unwrap_model(transformer)
1525
+ dtype = (
1526
+ torch.float16
1527
+ if args.mixed_precision == "fp16"
1528
+ else torch.bfloat16
1529
+ if args.mixed_precision == "bf16"
1530
+ else torch.float32
1531
+ )
1532
+ transformer = transformer.to(dtype)
1533
+ transformer_lora_layers = get_peft_model_state_dict(transformer)
1534
+
1535
+ CogVideoXPipeline.save_lora_weights(
1536
+ save_directory=args.output_dir,
1537
+ transformer_lora_layers=transformer_lora_layers,
1538
+ )
1539
+
1540
+ # Cleanup trained models to save memory
1541
+ del transformer
1542
+ free_memory()
1543
+
1544
+ # Final test inference
1545
+ pipe = CogVideoXPipeline.from_pretrained(
1546
+ args.pretrained_model_name_or_path,
1547
+ revision=args.revision,
1548
+ variant=args.variant,
1549
+ torch_dtype=weight_dtype,
1550
+ )
1551
+ pipe.scheduler = CogVideoXDPMScheduler.from_config(pipe.scheduler.config)
1552
+
1553
+ if args.enable_slicing:
1554
+ pipe.vae.enable_slicing()
1555
+ if args.enable_tiling:
1556
+ pipe.vae.enable_tiling()
1557
+
1558
+ # Load LoRA weights
1559
+ lora_scaling = args.lora_alpha / args.rank
1560
+ pipe.load_lora_weights(args.output_dir, adapter_name="cogvideox-lora")
1561
+ pipe.set_adapters(["cogvideox-lora"], [lora_scaling])
1562
+
1563
+ # Run inference
1564
+ validation_outputs = []
1565
+ if args.validation_prompt and args.num_validation_videos > 0:
1566
+ validation_prompts = args.validation_prompt.split(args.validation_prompt_separator)
1567
+ for validation_prompt in validation_prompts:
1568
+ pipeline_args = {
1569
+ "prompt": validation_prompt,
1570
+ "guidance_scale": args.guidance_scale,
1571
+ "use_dynamic_cfg": args.use_dynamic_cfg,
1572
+ "height": args.height,
1573
+ "width": args.width,
1574
+ }
1575
+
1576
+ video = log_validation(
1577
+ pipe=pipe,
1578
+ args=args,
1579
+ accelerator=accelerator,
1580
+ pipeline_args=pipeline_args,
1581
+ epoch=epoch,
1582
+ is_final_validation=True,
1583
+ )
1584
+ validation_outputs.extend(video)
1585
+
1586
+ if args.push_to_hub:
1587
+ save_model_card(
1588
+ repo_id,
1589
+ videos=validation_outputs,
1590
+ base_model=args.pretrained_model_name_or_path,
1591
+ validation_prompt=args.validation_prompt,
1592
+ repo_folder=args.output_dir,
1593
+ fps=args.fps,
1594
+ )
1595
+ upload_folder(
1596
+ repo_id=repo_id,
1597
+ folder_path=args.output_dir,
1598
+ commit_message="End of training",
1599
+ ignore_patterns=["step_*", "epoch_*"],
1600
+ )
1601
+
1602
+ accelerator.end_training()
1603
+
1604
+
1605
+ if __name__ == "__main__":
1606
+ args = get_args()
1607
+ main(args)
exp_code/1_benchmark/diffusers-WanS2V/examples/cogview4-control/README.md ADDED
@@ -0,0 +1,201 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Training CogView4 Control
2
+
3
+ This (experimental) example shows how to train Control LoRAs with [CogView4](https://huggingface.co/THUDM/CogView4-6B) by conditioning it with additional structural controls (like depth maps, poses, etc.). We provide a script for full fine-tuning, too, refer to [this section](#full-fine-tuning). To know more about CogView4 Control family, refer to the following resources:
4
+
5
+ To incorporate additional condition latents, we expand the input features of CogView-4 from 64 to 128. The first 64 channels correspond to the original input latents to be denoised, while the latter 64 channels correspond to control latents. This expansion happens on the `patch_embed` layer, where the combined latents are projected to the expected feature dimension of rest of the network. Inference is performed using the `CogView4ControlPipeline`.
6
+
7
+ > [!NOTE]
8
+ > **Gated model**
9
+ >
10
+ > As the model is gated, before using it with diffusers you first need to go to the [CogView4 Hugging Face page](https://huggingface.co/THUDM/CogView4-6B), fill in the form and accept the gate. Once you are in, you need to log in so that your system knows you’ve accepted the gate. Use the command below to log in:
11
+
12
+ ```bash
13
+ hf auth login
14
+ ```
15
+
16
+ The example command below shows how to launch fine-tuning for pose conditions. The dataset ([`raulc0399/open_pose_controlnet`](https://huggingface.co/datasets/raulc0399/open_pose_controlnet)) being used here already has the pose conditions of the original images, so we don't have to compute them.
17
+
18
+ ```bash
19
+ accelerate launch train_control_lora_cogview4.py \
20
+ --pretrained_model_name_or_path="THUDM/CogView4-6B" \
21
+ --dataset_name="raulc0399/open_pose_controlnet" \
22
+ --output_dir="pose-control-lora" \
23
+ --mixed_precision="bf16" \
24
+ --train_batch_size=1 \
25
+ --rank=64 \
26
+ --gradient_accumulation_steps=4 \
27
+ --gradient_checkpointing \
28
+ --use_8bit_adam \
29
+ --learning_rate=1e-4 \
30
+ --report_to="wandb" \
31
+ --lr_scheduler="constant" \
32
+ --lr_warmup_steps=0 \
33
+ --max_train_steps=5000 \
34
+ --validation_image="openpose.png" \
35
+ --validation_prompt="A couple, 4k photo, highly detailed" \
36
+ --offload \
37
+ --seed="0" \
38
+ --push_to_hub
39
+ ```
40
+
41
+ `openpose.png` comes from [here](https://huggingface.co/Adapter/t2iadapter/resolve/main/openpose.png).
42
+
43
+ You need to install `diffusers` from the branch of [this PR](https://github.com/huggingface/diffusers/pull/9999). When it's merged, you should install `diffusers` from the `main`.
44
+
45
+ The training script exposes additional CLI args that might be useful to experiment with:
46
+
47
+ * `use_lora_bias`: When set, additionally trains the biases of the `lora_B` layer.
48
+ * `train_norm_layers`: When set, additionally trains the normalization scales. Takes care of saving and loading.
49
+ * `lora_layers`: Specify the layers you want to apply LoRA to. If you specify "all-linear", all the linear layers will be LoRA-attached.
50
+
51
+ ### Training with DeepSpeed
52
+
53
+ It's possible to train with [DeepSpeed](https://github.com/microsoft/DeepSpeed), specifically leveraging the Zero2 system optimization. To use it, save the following config to an YAML file (feel free to modify as needed):
54
+
55
+ ```yaml
56
+ compute_environment: LOCAL_MACHINE
57
+ debug: false
58
+ deepspeed_config:
59
+ gradient_accumulation_steps: 1
60
+ gradient_clipping: 1.0
61
+ offload_optimizer_device: cpu
62
+ offload_param_device: cpu
63
+ zero3_init_flag: false
64
+ zero_stage: 2
65
+ distributed_type: DEEPSPEED
66
+ downcast_bf16: 'no'
67
+ enable_cpu_affinity: false
68
+ machine_rank: 0
69
+ main_training_function: main
70
+ mixed_precision: bf16
71
+ num_machines: 1
72
+ num_processes: 1
73
+ rdzv_backend: static
74
+ same_network: true
75
+ tpu_env: []
76
+ tpu_use_cluster: false
77
+ tpu_use_sudo: false
78
+ use_cpu: false
79
+ ```
80
+
81
+ And then while launching training, pass the config file:
82
+
83
+ ```bash
84
+ accelerate launch --config_file=CONFIG_FILE.yaml ...
85
+ ```
86
+
87
+ ### Inference
88
+
89
+ The pose images in our dataset were computed using the [`controlnet_aux`](https://github.com/huggingface/controlnet_aux) library. Let's install it first:
90
+
91
+ ```bash
92
+ pip install controlnet_aux
93
+ ```
94
+
95
+ And then we are ready:
96
+
97
+ ```py
98
+ from controlnet_aux import OpenposeDetector
99
+ from diffusers import CogView4ControlPipeline
100
+ from diffusers.utils import load_image
101
+ from PIL import Image
102
+ import numpy as np
103
+ import torch
104
+
105
+ pipe = CogView4ControlPipeline.from_pretrained("THUDM/CogView4-6B", torch_dtype=torch.bfloat16).to("cuda")
106
+ pipe.load_lora_weights("...") # change this.
107
+
108
+ open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
109
+
110
+ # prepare pose condition.
111
+ url = "https://huggingface.co/Adapter/t2iadapter/resolve/main/people.jpg"
112
+ image = load_image(url)
113
+ image = open_pose(image, detect_resolution=512, image_resolution=1024)
114
+ image = np.array(image)[:, :, ::-1]
115
+ image = Image.fromarray(np.uint8(image))
116
+
117
+ prompt = "A couple, 4k photo, highly detailed"
118
+
119
+ gen_images = pipe(
120
+ prompt=prompt,
121
+ control_image=image,
122
+ num_inference_steps=50,
123
+ joint_attention_kwargs={"scale": 0.9},
124
+ guidance_scale=25.,
125
+ ).images[0]
126
+ gen_images.save("output.png")
127
+ ```
128
+
129
+ ## Full fine-tuning
130
+
131
+ We provide a non-LoRA version of the training script `train_control_cogview4.py`. Here is an example command:
132
+
133
+ ```bash
134
+ accelerate launch --config_file=accelerate_ds2.yaml train_control_cogview4.py \
135
+ --pretrained_model_name_or_path="THUDM/CogView4-6B" \
136
+ --dataset_name="raulc0399/open_pose_controlnet" \
137
+ --output_dir="pose-control" \
138
+ --mixed_precision="bf16" \
139
+ --train_batch_size=2 \
140
+ --dataloader_num_workers=4 \
141
+ --gradient_accumulation_steps=4 \
142
+ --gradient_checkpointing \
143
+ --use_8bit_adam \
144
+ --proportion_empty_prompts=0.2 \
145
+ --learning_rate=5e-5 \
146
+ --adam_weight_decay=1e-4 \
147
+ --report_to="wandb" \
148
+ --lr_scheduler="cosine" \
149
+ --lr_warmup_steps=1000 \
150
+ --checkpointing_steps=1000 \
151
+ --max_train_steps=10000 \
152
+ --validation_steps=200 \
153
+ --validation_image "2_pose_1024.jpg" "3_pose_1024.jpg" \
154
+ --validation_prompt "two friends sitting by each other enjoying a day at the park, full hd, cinematic" "person enjoying a day at the park, full hd, cinematic" \
155
+ --offload \
156
+ --seed="0" \
157
+ --push_to_hub
158
+ ```
159
+
160
+ Change the `validation_image` and `validation_prompt` as needed.
161
+
162
+ For inference, this time, we will run:
163
+
164
+ ```py
165
+ from controlnet_aux import OpenposeDetector
166
+ from diffusers import CogView4ControlPipeline, CogView4Transformer2DModel
167
+ from diffusers.utils import load_image
168
+ from PIL import Image
169
+ import numpy as np
170
+ import torch
171
+
172
+ transformer = CogView4Transformer2DModel.from_pretrained("...") # change this.
173
+ pipe = CogView4ControlPipeline.from_pretrained(
174
+ "THUDM/CogView4-6B", transformer=transformer, torch_dtype=torch.bfloat16
175
+ ).to("cuda")
176
+
177
+ open_pose = OpenposeDetector.from_pretrained("lllyasviel/Annotators")
178
+
179
+ # prepare pose condition.
180
+ url = "https://huggingface.co/Adapter/t2iadapter/resolve/main/people.jpg"
181
+ image = load_image(url)
182
+ image = open_pose(image, detect_resolution=512, image_resolution=1024)
183
+ image = np.array(image)[:, :, ::-1]
184
+ image = Image.fromarray(np.uint8(image))
185
+
186
+ prompt = "A couple, 4k photo, highly detailed"
187
+
188
+ gen_images = pipe(
189
+ prompt=prompt,
190
+ control_image=image,
191
+ num_inference_steps=50,
192
+ guidance_scale=25.,
193
+ ).images[0]
194
+ gen_images.save("output.png")
195
+ ```
196
+
197
+ ## Things to note
198
+
199
+ * The scripts provided in this directory are experimental and educational. This means we may have to tweak things around to get good results on a given condition. We believe this is best done with the community 🤗
200
+ * The scripts are not memory-optimized but we offload the VAE and the text encoders to CPU when they are not used if `--offload` is specified.
201
+ * We can extract LoRAs from the fully fine-tuned model. While we currently don't provide any utilities for that, users are welcome to refer to [this script](https://github.com/Stability-AI/stability-ComfyUI-nodes/blob/master/control_lora_create.py) that provides a similar functionality.
exp_code/1_benchmark/diffusers-WanS2V/examples/cogview4-control/requirements.txt ADDED
@@ -0,0 +1,6 @@
 
 
 
 
 
 
 
1
+ transformers==4.47.0
2
+ wandb
3
+ torch
4
+ torchvision
5
+ accelerate==1.2.0
6
+ peft>=0.14.0
exp_code/1_benchmark/diffusers-WanS2V/examples/cogview4-control/train_control_cogview4.py ADDED
@@ -0,0 +1,1243 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ #!/usr/bin/env python
2
+ # coding=utf-8
3
+ # Copyright 2025 The HuggingFace Inc. team. All rights reserved.
4
+ #
5
+ # Licensed under the Apache License, Version 2.0 (the "License");
6
+ # you may not use this file except in compliance with the License.
7
+ # You may obtain a copy of the License at
8
+ #
9
+ # http://www.apache.org/licenses/LICENSE-2.0
10
+ #
11
+ # Unless required by applicable law or agreed to in writing, software
12
+ # distributed under the License is distributed on an "AS IS" BASIS,
13
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
14
+ # See the License for the specific language governing permissions and
15
+ # limitations under the License.
16
+
17
+ import argparse
18
+ import copy
19
+ import logging
20
+ import math
21
+ import os
22
+ import random
23
+ import shutil
24
+ from contextlib import nullcontext
25
+ from pathlib import Path
26
+
27
+ import accelerate
28
+ import numpy as np
29
+ import torch
30
+ import transformers
31
+ from accelerate import Accelerator
32
+ from accelerate.logging import get_logger
33
+ from accelerate.utils import DistributedType, ProjectConfiguration, set_seed
34
+ from datasets import load_dataset
35
+ from huggingface_hub import create_repo, upload_folder
36
+ from packaging import version
37
+ from PIL import Image
38
+ from torchvision import transforms
39
+ from tqdm.auto import tqdm
40
+
41
+ import diffusers
42
+ from diffusers import (
43
+ AutoencoderKL,
44
+ CogView4ControlPipeline,
45
+ CogView4Transformer2DModel,
46
+ FlowMatchEulerDiscreteScheduler,
47
+ )
48
+ from diffusers.optimization import get_scheduler
49
+ from diffusers.training_utils import (
50
+ compute_density_for_timestep_sampling,
51
+ compute_loss_weighting_for_sd3,
52
+ free_memory,
53
+ )
54
+ from diffusers.utils import check_min_version, is_wandb_available, load_image, make_image_grid
55
+ from diffusers.utils.hub_utils import load_or_create_model_card, populate_model_card
56
+ from diffusers.utils.torch_utils import is_compiled_module
57
+
58
+
59
+ if is_wandb_available():
60
+ import wandb
61
+
62
+ # Will error if the minimal version of diffusers is not installed. Remove at your own risks.
63
+ check_min_version("0.36.0.dev0")
64
+
65
+ logger = get_logger(__name__)
66
+
67
+ NORM_LAYER_PREFIXES = ["norm_q", "norm_k", "norm_added_q", "norm_added_k"]
68
+
69
+
70
+ def encode_images(pixels: torch.Tensor, vae: torch.nn.Module, weight_dtype):
71
+ pixel_latents = vae.encode(pixels.to(vae.dtype)).latent_dist.sample()
72
+ pixel_latents = (pixel_latents - vae.config.shift_factor) * vae.config.scaling_factor
73
+ return pixel_latents.to(weight_dtype)
74
+
75
+
76
+ def log_validation(cogview4_transformer, args, accelerator, weight_dtype, step, is_final_validation=False):
77
+ logger.info("Running validation... ")
78
+
79
+ if not is_final_validation:
80
+ cogview4_transformer = accelerator.unwrap_model(cogview4_transformer)
81
+ pipeline = CogView4ControlPipeline.from_pretrained(
82
+ args.pretrained_model_name_or_path,
83
+ transformer=cogview4_transformer,
84
+ torch_dtype=weight_dtype,
85
+ )
86
+ else:
87
+ transformer = CogView4Transformer2DModel.from_pretrained(args.output_dir, torch_dtype=weight_dtype)
88
+ pipeline = CogView4ControlPipeline.from_pretrained(
89
+ args.pretrained_model_name_or_path,
90
+ transformer=transformer,
91
+ torch_dtype=weight_dtype,
92
+ )
93
+
94
+ pipeline.to(accelerator.device)
95
+ pipeline.set_progress_bar_config(disable=True)
96
+
97
+ if args.seed is None:
98
+ generator = None
99
+ else:
100
+ generator = torch.Generator(device=accelerator.device).manual_seed(args.seed)
101
+
102
+ if len(args.validation_image) == len(args.validation_prompt):
103
+ validation_images = args.validation_image
104
+ validation_prompts = args.validation_prompt
105
+ elif len(args.validation_image) == 1:
106
+ validation_images = args.validation_image * len(args.validation_prompt)
107
+ validation_prompts = args.validation_prompt
108
+ elif len(args.validation_prompt) == 1:
109
+ validation_images = args.validation_image
110
+ validation_prompts = args.validation_prompt * len(args.validation_image)
111
+ else:
112
+ raise ValueError(
113
+ "number of `args.validation_image` and `args.validation_prompt` should be checked in `parse_args`"
114
+ )
115
+
116
+ image_logs = []
117
+ if is_final_validation or torch.backends.mps.is_available():
118
+ autocast_ctx = nullcontext()
119
+ else:
120
+ autocast_ctx = torch.autocast(accelerator.device.type, weight_dtype)
121
+
122
+ for validation_prompt, validation_image in zip(validation_prompts, validation_images):
123
+ validation_image = load_image(validation_image)
124
+ # maybe need to inference on 1024 to get a good image
125
+ validation_image = validation_image.resize((args.resolution, args.resolution))
126
+
127
+ images = []
128
+
129
+ for _ in range(args.num_validation_images):
130
+ with autocast_ctx:
131
+ image = pipeline(
132
+ prompt=validation_prompt,
133
+ control_image=validation_image,
134
+ num_inference_steps=50,
135
+ guidance_scale=args.guidance_scale,
136
+ max_sequence_length=args.max_sequence_length,
137
+ generator=generator,
138
+ height=args.resolution,
139
+ width=args.resolution,
140
+ ).images[0]
141
+ image = image.resize((args.resolution, args.resolution))
142
+ images.append(image)
143
+ image_logs.append(
144
+ {"validation_image": validation_image, "images": images, "validation_prompt": validation_prompt}
145
+ )
146
+
147
+ tracker_key = "test" if is_final_validation else "validation"
148
+ for tracker in accelerator.trackers:
149
+ if tracker.name == "tensorboard":
150
+ for log in image_logs:
151
+ images = log["images"]
152
+ validation_prompt = log["validation_prompt"]
153
+ validation_image = log["validation_image"]
154
+ formatted_images = []
155
+ formatted_images.append(np.asarray(validation_image))
156
+ for image in images:
157
+ formatted_images.append(np.asarray(image))
158
+ formatted_images = np.stack(formatted_images)
159
+ tracker.writer.add_images(validation_prompt, formatted_images, step, dataformats="NHWC")
160
+
161
+ elif tracker.name == "wandb":
162
+ formatted_images = []
163
+ for log in image_logs:
164
+ images = log["images"]
165
+ validation_prompt = log["validation_prompt"]
166
+ validation_image = log["validation_image"]
167
+ formatted_images.append(wandb.Image(validation_image, caption="Conditioning"))
168
+ for image in images:
169
+ image = wandb.Image(image, caption=validation_prompt)
170
+ formatted_images.append(image)
171
+
172
+ tracker.log({tracker_key: formatted_images})
173
+ else:
174
+ logger.warning(f"image logging not implemented for {tracker.name}")
175
+
176
+ del pipeline
177
+ free_memory()
178
+ return image_logs
179
+
180
+
181
+ def save_model_card(repo_id: str, image_logs=None, base_model=str, repo_folder=None):
182
+ img_str = ""
183
+ if image_logs is not None:
184
+ img_str = "You can find some example images below.\n\n"
185
+ for i, log in enumerate(image_logs):
186
+ images = log["images"]
187
+ validation_prompt = log["validation_prompt"]
188
+ validation_image = log["validation_image"]
189
+ validation_image.save(os.path.join(repo_folder, "image_control.png"))
190
+ img_str += f"prompt: {validation_prompt}\n"
191
+ images = [validation_image] + images
192
+ make_image_grid(images, 1, len(images)).save(os.path.join(repo_folder, f"images_{i}.png"))
193
+ img_str += f"![images_{i})](./images_{i}.png)\n"
194
+
195
+ model_description = f"""
196
+ # cogview4-control-{repo_id}
197
+
198
+ These are Control weights trained on {base_model} with new type of conditioning.
199
+ {img_str}
200
+
201
+ ## License
202
+
203
+ Please adhere to the licensing terms as described [here](https://huggingface.co/THUDM/CogView4-6b/blob/main/LICENSE.md)
204
+ """
205
+
206
+ model_card = load_or_create_model_card(
207
+ repo_id_or_path=repo_id,
208
+ from_training=True,
209
+ license="other",
210
+ base_model=base_model,
211
+ model_description=model_description,
212
+ inference=True,
213
+ )
214
+
215
+ tags = [
216
+ "cogview4",
217
+ "cogview4-diffusers",
218
+ "text-to-image",
219
+ "diffusers",
220
+ "control",
221
+ "diffusers-training",
222
+ ]
223
+ model_card = populate_model_card(model_card, tags=tags)
224
+
225
+ model_card.save(os.path.join(repo_folder, "README.md"))
226
+
227
+
228
+ def parse_args(input_args=None):
229
+ parser = argparse.ArgumentParser(description="Simple example of a CogView4 Control training script.")
230
+ parser.add_argument(
231
+ "--pretrained_model_name_or_path",
232
+ type=str,
233
+ default=None,
234
+ required=True,
235
+ help="Path to pretrained model or model identifier from huggingface.co/models.",
236
+ )
237
+ parser.add_argument(
238
+ "--variant",
239
+ type=str,
240
+ default=None,
241
+ help="Variant of the model files of the pretrained model identifier from huggingface.co/models, 'e.g.' fp16",
242
+ )
243
+ parser.add_argument(
244
+ "--revision",
245
+ type=str,
246
+ default=None,
247
+ required=False,
248
+ help="Revision of pretrained model identifier from huggingface.co/models.",
249
+ )
250
+ parser.add_argument(
251
+ "--output_dir",
252
+ type=str,
253
+ default="cogview4-control",
254
+ help="The output directory where the model predictions and checkpoints will be written.",
255
+ )
256
+ parser.add_argument(
257
+ "--cache_dir",
258
+ type=str,
259
+ default=None,
260
+ help="The directory where the downloaded models and datasets will be stored.",
261
+ )
262
+ parser.add_argument("--seed", type=int, default=None, help="A seed for reproducible training.")
263
+ parser.add_argument(
264
+ "--resolution",
265
+ type=int,
266
+ default=1024,
267
+ help=(
268
+ "The resolution for input images, all the images in the train/validation dataset will be resized to this"
269
+ " resolution"
270
+ ),
271
+ )
272
+ parser.add_argument(
273
+ "--max_sequence_length", type=int, default=128, help="The maximum sequence length for the prompt."
274
+ )
275
+ parser.add_argument(
276
+ "--train_batch_size", type=int, default=4, help="Batch size (per device) for the training dataloader."
277
+ )
278
+ parser.add_argument("--num_train_epochs", type=int, default=1)
279
+ parser.add_argument(
280
+ "--max_train_steps",
281
+ type=int,
282
+ default=None,
283
+ help="Total number of training steps to perform. If provided, overrides num_train_epochs.",
284
+ )
285
+ parser.add_argument(
286
+ "--checkpointing_steps",
287
+ type=int,
288
+ default=500,
289
+ help=(
290
+ "Save a checkpoint of the training state every X updates. Checkpoints can be used for resuming training via `--resume_from_checkpoint`. "
291
+ "In the case that the checkpoint is better than the final trained model, the checkpoint can also be used for inference."
292
+ "Using a checkpoint for inference requires separate loading of the original pipeline and the individual checkpointed model components."
293
+ "See https://huggingface.co/docs/diffusers/main/en/training/dreambooth#performing-inference-using-a-saved-checkpoint for step by step"
294
+ "instructions."
295
+ ),
296
+ )
297
+ parser.add_argument(
298
+ "--checkpoints_total_limit",
299
+ type=int,
300
+ default=None,
301
+ help=("Max number of checkpoints to store."),
302
+ )
303
+ parser.add_argument(
304
+ "--resume_from_checkpoint",
305
+ type=str,
306
+ default=None,
307
+ help=(
308
+ "Whether training should be resumed from a previous checkpoint. Use a path saved by"
309
+ ' `--checkpointing_steps`, or `"latest"` to automatically select the last available checkpoint.'
310
+ ),
311
+ )
312
+ parser.add_argument(
313
+ "--proportion_empty_prompts",
314
+ type=float,
315
+ default=0,
316
+ help="Proportion of image prompts to be replaced with empty strings. Defaults to 0 (no prompt replacement).",
317
+ )
318
+ parser.add_argument(
319
+ "--gradient_accumulation_steps",
320
+ type=int,
321
+ default=1,
322
+ help="Number of updates steps to accumulate before performing a backward/update pass.",
323
+ )
324
+ parser.add_argument(
325
+ "--gradient_checkpointing",
326
+ action="store_true",
327
+ help="Whether or not to use gradient checkpointing to save memory at the expense of slower backward pass.",
328
+ )
329
+ parser.add_argument(
330
+ "--learning_rate",
331
+ type=float,
332
+ default=5e-6,
333
+ help="Initial learning rate (after the potential warmup period) to use.",
334
+ )
335
+ parser.add_argument(
336
+ "--scale_lr",
337
+ action="store_true",
338
+ default=False,
339
+ help="Scale the learning rate by the number of GPUs, gradient accumulation steps, and batch size.",
340
+ )
341
+ parser.add_argument(
342
+ "--lr_scheduler",
343
+ type=str,
344
+ default="constant",
345
+ help=(
346
+ 'The scheduler type to use. Choose between ["linear", "cosine", "cosine_with_restarts", "polynomial",'
347
+ ' "constant", "constant_with_warmup"]'
348
+ ),
349
+ )
350
+ parser.add_argument(
351
+ "--lr_warmup_steps", type=int, default=500, help="Number of steps for the warmup in the lr scheduler."
352
+ )
353
+ parser.add_argument(
354
+ "--lr_num_cycles",
355
+ type=int,
356
+ default=1,
357
+ help="Number of hard resets of the lr in cosine_with_restarts scheduler.",
358
+ )
359
+ parser.add_argument("--lr_power", type=float, default=1.0, help="Power factor of the polynomial scheduler.")
360
+ parser.add_argument(
361
+ "--use_8bit_adam", action="store_true", help="Whether or not to use 8-bit Adam from bitsandbytes."
362
+ )
363
+
364
+ parser.add_argument(
365
+ "--dataloader_num_workers",
366
+ type=int,
367
+ default=0,
368
+ help=(
369
+ "Number of subprocesses to use for data loading. 0 means that the data will be loaded in the main process."
370
+ ),
371
+ )
372
+ parser.add_argument("--adam_beta1", type=float, default=0.9, help="The beta1 parameter for the Adam optimizer.")
373
+ parser.add_argument("--adam_beta2", type=float, default=0.999, help="The beta2 parameter for the Adam optimizer.")
374
+ parser.add_argument("--adam_weight_decay", type=float, default=1e-2, help="Weight decay to use.")
375
+ parser.add_argument("--adam_epsilon", type=float, default=1e-08, help="Epsilon value for the Adam optimizer")
376
+ parser.add_argument("--max_grad_norm", default=1.0, type=float, help="Max gradient norm.")
377
+ parser.add_argument("--push_to_hub", action="store_true", help="Whether or not to push the model to the Hub.")
378
+ parser.add_argument("--hub_token", type=str, default=None, help="The token to use to push to the Model Hub.")
379
+ parser.add_argument(
380
+ "--hub_model_id",
381
+ type=str,
382
+ default=None,
383
+ help="The name of the repository to keep in sync with the local `output_dir`.",
384
+ )
385
+ parser.add_argument(
386
+ "--logging_dir",
387
+ type=str,
388
+ default="logs",
389
+ help=(
390
+ "[TensorBoard](https://www.tensorflow.org/tensorboard) log directory. Will default to"
391
+ " *output_dir/runs/**CURRENT_DATETIME_HOSTNAME***."
392
+ ),
393
+ )
394
+ parser.add_argument(
395
+ "--allow_tf32",
396
+ action="store_true",
397
+ help=(
398
+ "Whether or not to allow TF32 on Ampere GPUs. Can be used to speed up training. For more information, see"
399
+ " https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices"
400
+ ),
401
+ )
402
+ parser.add_argument(
403
+ "--report_to",
404
+ type=str,
405
+ default="tensorboard",
406
+ help=(
407
+ 'The integration to report the results and logs to. Supported platforms are `"tensorboard"`'
408
+ ' (default), `"wandb"` and `"comet_ml"`. Use `"all"` to report to all integrations.'
409
+ ),
410
+ )
411
+ parser.add_argument(
412
+ "--mixed_precision",
413
+ type=str,
414
+ default=None,
415
+ choices=["no", "fp16", "bf16"],
416
+ help=(
417
+ "Whether to use mixed precision. Choose between fp16 and bf16 (bfloat16). Bf16 requires PyTorch >="
418
+ " 1.10.and an Nvidia Ampere GPU. Default to the value of accelerate config of the current system or the"
419
+ " flag passed with the `accelerate.launch` command. Use this argument to override the accelerate config."
420
+ ),
421
+ )
422
+ parser.add_argument(
423
+ "--dataset_name",
424
+ type=str,
425
+ default=None,
426
+ help=(
427
+ "The name of the Dataset (from the HuggingFace hub) to train on (could be your own, possibly private,"
428
+ " dataset). It can also be a path pointing to a local copy of a dataset in your filesystem,"
429
+ " or to a folder containing files that 🤗 Datasets can understand."
430
+ ),
431
+ )
432
+ parser.add_argument(
433
+ "--dataset_config_name",
434
+ type=str,
435
+ default=None,
436
+ help="The config of the Dataset, leave as None if there's only one config.",
437
+ )
438
+ parser.add_argument(
439
+ "--image_column", type=str, default="image", help="The column of the dataset containing the target image."
440
+ )
441
+ parser.add_argument(
442
+ "--conditioning_image_column",
443
+ type=str,
444
+ default="conditioning_image",
445
+ help="The column of the dataset containing the control conditioning image.",
446
+ )
447
+ parser.add_argument(
448
+ "--caption_column",
449
+ type=str,
450
+ default="text",
451
+ help="The column of the dataset containing a caption or a list of captions.",
452
+ )
453
+ parser.add_argument("--log_dataset_samples", action="store_true", help="Whether to log somple dataset samples.")
454
+ parser.add_argument(
455
+ "--max_train_samples",
456
+ type=int,
457
+ default=None,
458
+ help=(
459
+ "For debugging purposes or quicker training, truncate the number of training examples to this "
460
+ "value if set."
461
+ ),
462
+ )
463
+ parser.add_argument(
464
+ "--validation_prompt",
465
+ type=str,
466
+ default=None,
467
+ nargs="+",
468
+ help=(
469
+ "A set of prompts evaluated every `--validation_steps` and logged to `--report_to`."
470
+ " Provide either a matching number of `--validation_image`s, a single `--validation_image`"
471
+ " to be used with all prompts, or a single prompt that will be used with all `--validation_image`s."
472
+ ),
473
+ )
474
+ parser.add_argument(
475
+ "--validation_image",
476
+ type=str,
477
+ default=None,
478
+ nargs="+",
479
+ help=(
480
+ "A set of paths to the control conditioning image be evaluated every `--validation_steps`"
481
+ " and logged to `--report_to`. Provide either a matching number of `--validation_prompt`s, a"
482
+ " a single `--validation_prompt` to be used with all `--validation_image`s, or a single"
483
+ " `--validation_image` that will be used with all `--validation_prompt`s."
484
+ ),
485
+ )
486
+ parser.add_argument(
487
+ "--num_validation_images",
488
+ type=int,
489
+ default=1,
490
+ help="Number of images to be generated for each `--validation_image`, `--validation_prompt` pair",
491
+ )
492
+ parser.add_argument(
493
+ "--validation_steps",
494
+ type=int,
495
+ default=100,
496
+ help=(
497
+ "Run validation every X steps. Validation consists of running the prompt"
498
+ " `args.validation_prompt` multiple times: `args.num_validation_images`"
499
+ " and logging the images."
500
+ ),
501
+ )
502
+ parser.add_argument(
503
+ "--tracker_project_name",
504
+ type=str,
505
+ default="cogview4_train_control",
506
+ help=(
507
+ "The `project_name` argument passed to Accelerator.init_trackers for"
508
+ " more information see https://huggingface.co/docs/accelerate/v0.17.0/en/package_reference/accelerator#accelerate.Accelerator"
509
+ ),
510
+ )
511
+ parser.add_argument(
512
+ "--jsonl_for_train",
513
+ type=str,
514
+ default=None,
515
+ help="Path to the jsonl file containing the training data.",
516
+ )
517
+ parser.add_argument(
518
+ "--only_target_transformer_blocks",
519
+ action="store_true",
520
+ help="If we should only target the transformer blocks to train along with the input layer (`x_embedder`).",
521
+ )
522
+ parser.add_argument(
523
+ "--guidance_scale",
524
+ type=float,
525
+ default=3.5,
526
+ help="the guidance scale used for transformer.",
527
+ )
528
+
529
+ parser.add_argument(
530
+ "--upcast_before_saving",
531
+ action="store_true",
532
+ help=(
533
+ "Whether to upcast the trained transformer layers to float32 before saving (at the end of training). "
534
+ "Defaults to precision dtype used for training to save memory"
535
+ ),
536
+ )
537
+
538
+ parser.add_argument(
539
+ "--weighting_scheme",
540
+ type=str,
541
+ default="none",
542
+ choices=["sigma_sqrt", "logit_normal", "mode", "cosmap", "none"],
543
+ help=('We default to the "none" weighting scheme for uniform sampling and uniform loss'),
544
+ )
545
+ parser.add_argument(
546
+ "--logit_mean", type=float, default=0.0, help="mean to use when using the `'logit_normal'` weighting scheme."
547
+ )
548
+ parser.add_argument(
549
+ "--logit_std", type=float, default=1.0, help="std to use when using the `'logit_normal'` weighting scheme."
550
+ )
551
+ parser.add_argument(
552
+ "--mode_scale",
553
+ type=float,
554
+ default=1.29,
555
+ help="Scale of mode weighting scheme. Only effective when using the `'mode'` as the `weighting_scheme`.",
556
+ )
557
+ parser.add_argument(
558
+ "--offload",
559
+ action="store_true",
560
+ help="Whether to offload the VAE and the text encoders to CPU when they are not used.",
561
+ )
562
+
563
+ if input_args is not None:
564
+ args = parser.parse_args(input_args)
565
+ else:
566
+ args = parser.parse_args()
567
+
568
+ if args.dataset_name is None and args.jsonl_for_train is None:
569
+ raise ValueError("Specify either `--dataset_name` or `--jsonl_for_train`")
570
+
571
+ if args.dataset_name is not None and args.jsonl_for_train is not None:
572
+ raise ValueError("Specify only one of `--dataset_name` or `--jsonl_for_train`")
573
+
574
+ if args.proportion_empty_prompts < 0 or args.proportion_empty_prompts > 1:
575
+ raise ValueError("`--proportion_empty_prompts` must be in the range [0, 1].")
576
+
577
+ if args.validation_prompt is not None and args.validation_image is None:
578
+ raise ValueError("`--validation_image` must be set if `--validation_prompt` is set")
579
+
580
+ if args.validation_prompt is None and args.validation_image is not None:
581
+ raise ValueError("`--validation_prompt` must be set if `--validation_image` is set")
582
+
583
+ if (
584
+ args.validation_image is not None
585
+ and args.validation_prompt is not None
586
+ and len(args.validation_image) != 1
587
+ and len(args.validation_prompt) != 1
588
+ and len(args.validation_image) != len(args.validation_prompt)
589
+ ):
590
+ raise ValueError(
591
+ "Must provide either 1 `--validation_image`, 1 `--validation_prompt`,"
592
+ " or the same number of `--validation_prompt`s and `--validation_image`s"
593
+ )
594
+
595
+ if args.resolution % 8 != 0:
596
+ raise ValueError(
597
+ "`--resolution` must be divisible by 8 for consistently sized encoded images between the VAE and the cogview4 transformer."
598
+ )
599
+
600
+ return args
601
+
602
+
603
+ def get_train_dataset(args, accelerator):
604
+ dataset = None
605
+ if args.dataset_name is not None:
606
+ # Downloading and loading a dataset from the hub.
607
+ dataset = load_dataset(
608
+ args.dataset_name,
609
+ args.dataset_config_name,
610
+ cache_dir=args.cache_dir,
611
+ )
612
+ if args.jsonl_for_train is not None:
613
+ # load from json
614
+ dataset = load_dataset("json", data_files=args.jsonl_for_train, cache_dir=args.cache_dir)
615
+ dataset = dataset.flatten_indices()
616
+ # Preprocessing the datasets.
617
+ # We need to tokenize inputs and targets.
618
+ column_names = dataset["train"].column_names
619
+
620
+ # 6. Get the column names for input/target.
621
+ if args.image_column is None:
622
+ image_column = column_names[0]
623
+ logger.info(f"image column defaulting to {image_column}")
624
+ else:
625
+ image_column = args.image_column
626
+ if image_column not in column_names:
627
+ raise ValueError(
628
+ f"`--image_column` value '{args.image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
629
+ )
630
+
631
+ if args.caption_column is None:
632
+ caption_column = column_names[1]
633
+ logger.info(f"caption column defaulting to {caption_column}")
634
+ else:
635
+ caption_column = args.caption_column
636
+ if caption_column not in column_names:
637
+ raise ValueError(
638
+ f"`--caption_column` value '{args.caption_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
639
+ )
640
+
641
+ if args.conditioning_image_column is None:
642
+ conditioning_image_column = column_names[2]
643
+ logger.info(f"conditioning image column defaulting to {conditioning_image_column}")
644
+ else:
645
+ conditioning_image_column = args.conditioning_image_column
646
+ if conditioning_image_column not in column_names:
647
+ raise ValueError(
648
+ f"`--conditioning_image_column` value '{args.conditioning_image_column}' not found in dataset columns. Dataset columns are: {', '.join(column_names)}"
649
+ )
650
+
651
+ with accelerator.main_process_first():
652
+ train_dataset = dataset["train"].shuffle(seed=args.seed)
653
+ if args.max_train_samples is not None:
654
+ train_dataset = train_dataset.select(range(args.max_train_samples))
655
+ return train_dataset
656
+
657
+
658
+ def prepare_train_dataset(dataset, accelerator):
659
+ image_transforms = transforms.Compose(
660
+ [
661
+ transforms.Resize((args.resolution, args.resolution), interpolation=transforms.InterpolationMode.BILINEAR),
662
+ transforms.ToTensor(),
663
+ transforms.Lambda(lambda x: x * 2 - 1),
664
+ ]
665
+ )
666
+
667
+ def preprocess_train(examples):
668
+ images = [
669
+ (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB"))
670
+ for image in examples[args.image_column]
671
+ ]
672
+ images = [image_transforms(image) for image in images]
673
+
674
+ conditioning_images = [
675
+ (image.convert("RGB") if not isinstance(image, str) else Image.open(image).convert("RGB"))
676
+ for image in examples[args.conditioning_image_column]
677
+ ]
678
+ conditioning_images = [image_transforms(image) for image in conditioning_images]
679
+ examples["pixel_values"] = images
680
+ examples["conditioning_pixel_values"] = conditioning_images
681
+
682
+ is_caption_list = isinstance(examples[args.caption_column][0], list)
683
+ if is_caption_list:
684
+ examples["captions"] = [max(example, key=len) for example in examples[args.caption_column]]
685
+ else:
686
+ examples["captions"] = list(examples[args.caption_column])
687
+
688
+ return examples
689
+
690
+ with accelerator.main_process_first():
691
+ dataset = dataset.with_transform(preprocess_train)
692
+
693
+ return dataset
694
+
695
+
696
+ def collate_fn(examples):
697
+ pixel_values = torch.stack([example["pixel_values"] for example in examples])
698
+ pixel_values = pixel_values.to(memory_format=torch.contiguous_format).float()
699
+ conditioning_pixel_values = torch.stack([example["conditioning_pixel_values"] for example in examples])
700
+ conditioning_pixel_values = conditioning_pixel_values.to(memory_format=torch.contiguous_format).float()
701
+ captions = [example["captions"] for example in examples]
702
+ return {"pixel_values": pixel_values, "conditioning_pixel_values": conditioning_pixel_values, "captions": captions}
703
+
704
+
705
+ def main(args):
706
+ if args.report_to == "wandb" and args.hub_token is not None:
707
+ raise ValueError(
708
+ "You cannot use both --report_to=wandb and --hub_token due to a security risk of exposing your token."
709
+ " Please use `hf auth login` to authenticate with the Hub."
710
+ )
711
+
712
+ logging_out_dir = Path(args.output_dir, args.logging_dir)
713
+
714
+ if torch.backends.mps.is_available() and args.mixed_precision == "bf16":
715
+ # due to pytorch#99272, MPS does not yet support bfloat16.
716
+ raise ValueError(
717
+ "Mixed precision training with bfloat16 is not supported on MPS. Please use fp16 (recommended) or fp32 instead."
718
+ )
719
+
720
+ accelerator_project_config = ProjectConfiguration(project_dir=args.output_dir, logging_dir=str(logging_out_dir))
721
+
722
+ accelerator = Accelerator(
723
+ gradient_accumulation_steps=args.gradient_accumulation_steps,
724
+ mixed_precision=args.mixed_precision,
725
+ log_with=args.report_to,
726
+ project_config=accelerator_project_config,
727
+ )
728
+
729
+ # Disable AMP for MPS. A technique for accelerating machine learning computations on iOS and macOS devices.
730
+ if torch.backends.mps.is_available():
731
+ logger.info("MPS is enabled. Disabling AMP.")
732
+ accelerator.native_amp = False
733
+
734
+ # Make one log on every process with the configuration for debugging.
735
+ logging.basicConfig(
736
+ format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
737
+ datefmt="%m/%d/%Y %H:%M:%S",
738
+ # DEBUG, INFO, WARNING, ERROR, CRITICAL
739
+ level=logging.INFO,
740
+ )
741
+ logger.info(accelerator.state, main_process_only=False)
742
+
743
+ if accelerator.is_local_main_process:
744
+ transformers.utils.logging.set_verbosity_warning()
745
+ diffusers.utils.logging.set_verbosity_info()
746
+ else:
747
+ transformers.utils.logging.set_verbosity_error()
748
+ diffusers.utils.logging.set_verbosity_error()
749
+
750
+ # If passed along, set the training seed now.
751
+ if args.seed is not None:
752
+ set_seed(args.seed)
753
+
754
+ # Handle the repository creation
755
+ if accelerator.is_main_process:
756
+ if args.output_dir is not None:
757
+ os.makedirs(args.output_dir, exist_ok=True)
758
+
759
+ if args.push_to_hub:
760
+ repo_id = create_repo(
761
+ repo_id=args.hub_model_id or Path(args.output_dir).name, exist_ok=True, token=args.hub_token
762
+ ).repo_id
763
+
764
+ # Load models. We will load the text encoders later in a pipeline to compute
765
+ # embeddings.
766
+ vae = AutoencoderKL.from_pretrained(
767
+ args.pretrained_model_name_or_path,
768
+ subfolder="vae",
769
+ revision=args.revision,
770
+ variant=args.variant,
771
+ )
772
+ cogview4_transformer = CogView4Transformer2DModel.from_pretrained(
773
+ args.pretrained_model_name_or_path,
774
+ subfolder="transformer",
775
+ revision=args.revision,
776
+ variant=args.variant,
777
+ )
778
+ logger.info("All models loaded successfully")
779
+
780
+ noise_scheduler = FlowMatchEulerDiscreteScheduler.from_pretrained(
781
+ args.pretrained_model_name_or_path,
782
+ subfolder="scheduler",
783
+ )
784
+ noise_scheduler_copy = copy.deepcopy(noise_scheduler)
785
+ if not args.only_target_transformer_blocks:
786
+ cogview4_transformer.requires_grad_(True)
787
+ vae.requires_grad_(False)
788
+
789
+ # cast down and move to the CPU
790
+ weight_dtype = torch.float32
791
+ if accelerator.mixed_precision == "fp16":
792
+ weight_dtype = torch.float16
793
+ elif accelerator.mixed_precision == "bf16":
794
+ weight_dtype = torch.bfloat16
795
+
796
+ # let's not move the VAE to the GPU yet.
797
+ vae.to(dtype=torch.float32) # keep the VAE in float32.
798
+
799
+ # enable image inputs
800
+ with torch.no_grad():
801
+ patch_size = cogview4_transformer.config.patch_size
802
+ initial_input_channels = cogview4_transformer.config.in_channels * patch_size**2
803
+ new_linear = torch.nn.Linear(
804
+ cogview4_transformer.patch_embed.proj.in_features * 2,
805
+ cogview4_transformer.patch_embed.proj.out_features,
806
+ bias=cogview4_transformer.patch_embed.proj.bias is not None,
807
+ dtype=cogview4_transformer.dtype,
808
+ device=cogview4_transformer.device,
809
+ )
810
+ new_linear.weight.zero_()
811
+ new_linear.weight[:, :initial_input_channels].copy_(cogview4_transformer.patch_embed.proj.weight)
812
+ if cogview4_transformer.patch_embed.proj.bias is not None:
813
+ new_linear.bias.copy_(cogview4_transformer.patch_embed.proj.bias)
814
+ cogview4_transformer.patch_embed.proj = new_linear
815
+
816
+ assert torch.all(cogview4_transformer.patch_embed.proj.weight[:, initial_input_channels:].data == 0)
817
+ cogview4_transformer.register_to_config(
818
+ in_channels=cogview4_transformer.config.in_channels * 2, out_channels=cogview4_transformer.config.in_channels
819
+ )
820
+
821
+ if args.only_target_transformer_blocks:
822
+ cogview4_transformer.patch_embed.proj.requires_grad_(True)
823
+ for name, module in cogview4_transformer.named_modules():
824
+ if "transformer_blocks" in name:
825
+ module.requires_grad_(True)
826
+ else:
827
+ module.requirs_grad_(False)
828
+
829
+ def unwrap_model(model):
830
+ model = accelerator.unwrap_model(model)
831
+ model = model._orig_mod if is_compiled_module(model) else model
832
+ return model
833
+
834
+ # `accelerate` 0.16.0 will have better support for customized saving
835
+ if version.parse(accelerate.__version__) >= version.parse("0.16.0"):
836
+
837
+ def save_model_hook(models, weights, output_dir):
838
+ if accelerator.is_main_process:
839
+ for model in models:
840
+ if isinstance(unwrap_model(model), type(unwrap_model(cogview4_transformer))):
841
+ model = unwrap_model(model)
842
+ model.save_pretrained(os.path.join(output_dir, "transformer"))
843
+ else:
844
+ raise ValueError(f"unexpected save model: {model.__class__}")
845
+
846
+ # make sure to pop weight so that corresponding model is not saved again
847
+ if weights:
848
+ weights.pop()
849
+
850
+ def load_model_hook(models, input_dir):
851
+ transformer_ = None
852
+
853
+ if not accelerator.distributed_type == DistributedType.DEEPSPEED:
854
+ while len(models) > 0:
855
+ model = models.pop()
856
+
857
+ if isinstance(unwrap_model(model), type(unwrap_model(cogview4_transformer))):
858
+ transformer_ = model # noqa: F841
859
+ else:
860
+ raise ValueError(f"unexpected save model: {unwrap_model(model).__class__}")
861
+
862
+ else:
863
+ transformer_ = CogView4Transformer2DModel.from_pretrained(input_dir, subfolder="transformer") # noqa: F841
864
+
865
+ accelerator.register_save_state_pre_hook(save_model_hook)
866
+ accelerator.register_load_state_pre_hook(load_model_hook)
867
+
868
+ if args.gradient_checkpointing:
869
+ cogview4_transformer.enable_gradient_checkpointing()
870
+
871
+ # Enable TF32 for faster training on Ampere GPUs,
872
+ # cf https://pytorch.org/docs/stable/notes/cuda.html#tensorfloat-32-tf32-on-ampere-devices
873
+ if args.allow_tf32:
874
+ torch.backends.cuda.matmul.allow_tf32 = True
875
+
876
+ if args.scale_lr:
877
+ args.learning_rate = (
878
+ args.learning_rate * args.gradient_accumulation_steps * args.train_batch_size * accelerator.num_processes
879
+ )
880
+
881
+ # Use 8-bit Adam for lower memory usage or to fine-tune the model in 16GB GPUs
882
+ if args.use_8bit_adam:
883
+ try:
884
+ import bitsandbytes as bnb
885
+ except ImportError:
886
+ raise ImportError(
887
+ "To use 8-bit Adam, please install the bitsandbytes library: `pip install bitsandbytes`."
888
+ )
889
+
890
+ optimizer_class = bnb.optim.AdamW8bit
891
+ else:
892
+ optimizer_class = torch.optim.AdamW
893
+
894
+ # Optimization parameters
895
+ optimizer = optimizer_class(
896
+ cogview4_transformer.parameters(),
897
+ lr=args.learning_rate,
898
+ betas=(args.adam_beta1, args.adam_beta2),
899
+ weight_decay=args.adam_weight_decay,
900
+ eps=args.adam_epsilon,
901
+ )
902
+
903
+ # Prepare dataset and dataloader.
904
+ train_dataset = get_train_dataset(args, accelerator)
905
+ train_dataset = prepare_train_dataset(train_dataset, accelerator)
906
+ train_dataloader = torch.utils.data.DataLoader(
907
+ train_dataset,
908
+ shuffle=True,
909
+ collate_fn=collate_fn,
910
+ batch_size=args.train_batch_size,
911
+ num_workers=args.dataloader_num_workers,
912
+ )
913
+
914
+ # Scheduler and math around the number of training steps.
915
+ # Check the PR https://github.com/huggingface/diffusers/pull/8312 for detailed explanation.
916
+ if args.max_train_steps is None:
917
+ len_train_dataloader_after_sharding = math.ceil(len(train_dataloader) / accelerator.num_processes)
918
+ num_update_steps_per_epoch = math.ceil(len_train_dataloader_after_sharding / args.gradient_accumulation_steps)
919
+ num_training_steps_for_scheduler = (
920
+ args.num_train_epochs * num_update_steps_per_epoch * accelerator.num_processes
921
+ )
922
+ else:
923
+ num_training_steps_for_scheduler = args.max_train_steps * accelerator.num_processes
924
+
925
+ lr_scheduler = get_scheduler(
926
+ args.lr_scheduler,
927
+ optimizer=optimizer,
928
+ num_warmup_steps=args.lr_warmup_steps * accelerator.num_processes,
929
+ num_training_steps=args.max_train_steps * accelerator.num_processes,
930
+ num_cycles=args.lr_num_cycles,
931
+ power=args.lr_power,
932
+ )
933
+ # Prepare everything with our `accelerator`.
934
+ cogview4_transformer, optimizer, train_dataloader, lr_scheduler = accelerator.prepare(
935
+ cogview4_transformer, optimizer, train_dataloader, lr_scheduler
936
+ )
937
+
938
+ # We need to recalculate our total training steps as the size of the training dataloader may have changed.
939
+ num_update_steps_per_epoch = math.ceil(len(train_dataloader) / args.gradient_accumulation_steps)
940
+ if args.max_train_steps is None:
941
+ args.max_train_steps = args.num_train_epochs * num_update_steps_per_epoch
942
+ if num_training_steps_for_scheduler != args.max_train_steps * accelerator.num_processes:
943
+ logger.warning(
944
+ f"The length of the 'train_dataloader' after 'accelerator.prepare' ({len(train_dataloader)}) does not match "
945
+ f"the expected length ({len_train_dataloader_after_sharding}) when the learning rate scheduler was created. "
946
+ f"This inconsistency may result in the learning rate scheduler not functioning properly."
947
+ )
948
+ # Afterwards we recalculate our number of training epochs
949
+ args.num_train_epochs = math.ceil(args.max_train_steps / num_update_steps_per_epoch)
950
+
951
+ # We need to initialize the trackers we use, and also store our configuration.
952
+ # The trackers initializes automatically on the main process.
953
+ if accelerator.is_main_process:
954
+ tracker_config = dict(vars(args))
955
+
956
+ # tensorboard cannot handle list types for config
957
+ tracker_config.pop("validation_prompt")
958
+ tracker_config.pop("validation_image")
959
+
960
+ accelerator.init_trackers(args.tracker_project_name, config=tracker_config)
961
+
962
+ # Train!
963
+ total_batch_size = args.train_batch_size * accelerator.num_processes * args.gradient_accumulation_steps
964
+
965
+ logger.info("***** Running training *****")
966
+ logger.info(f" Num examples = {len(train_dataset)}")
967
+ logger.info(f" Num batches each epoch = {len(train_dataloader)}")
968
+ logger.info(f" Num Epochs = {args.num_train_epochs}")
969
+ logger.info(f" Instantaneous batch size per device = {args.train_batch_size}")
970
+ logger.info(f" Total train batch size (w. parallel, distributed & accumulation) = {total_batch_size}")
971
+ logger.info(f" Gradient Accumulation steps = {args.gradient_accumulation_steps}")
972
+ logger.info(f" Total optimization steps = {args.max_train_steps}")
973
+ global_step = 0
974
+ first_epoch = 0
975
+
976
+ # Create a pipeline for text encoding. We will move this pipeline to GPU/CPU as needed.
977
+ text_encoding_pipeline = CogView4ControlPipeline.from_pretrained(
978
+ args.pretrained_model_name_or_path, transformer=None, vae=None, torch_dtype=weight_dtype
979
+ )
980
+ tokenizer = text_encoding_pipeline.tokenizer
981
+
982
+ # Potentially load in the weights and states from a previous save
983
+ if args.resume_from_checkpoint:
984
+ if args.resume_from_checkpoint != "latest":
985
+ path = os.path.basename(args.resume_from_checkpoint)
986
+ else:
987
+ # Get the most recent checkpoint
988
+ dirs = os.listdir(args.output_dir)
989
+ dirs = [d for d in dirs if d.startswith("checkpoint")]
990
+ dirs = sorted(dirs, key=lambda x: int(x.split("-")[1]))
991
+ path = dirs[-1] if len(dirs) > 0 else None
992
+
993
+ if path is None:
994
+ logger.info(f"Checkpoint '{args.resume_from_checkpoint}' does not exist. Starting a new training run.")
995
+ args.resume_from_checkpoint = None
996
+ initial_global_step = 0
997
+ else:
998
+ logger.info(f"Resuming from checkpoint {path}")
999
+ accelerator.load_state(os.path.join(args.output_dir, path))
1000
+ global_step = int(path.split("-")[1])
1001
+
1002
+ initial_global_step = global_step
1003
+ first_epoch = global_step // num_update_steps_per_epoch
1004
+ else:
1005
+ initial_global_step = 0
1006
+
1007
+ if accelerator.is_main_process and args.report_to == "wandb" and args.log_dataset_samples:
1008
+ logger.info("Logging some dataset samples.")
1009
+ formatted_images = []
1010
+ formatted_control_images = []
1011
+ all_prompts = []
1012
+ for i, batch in enumerate(train_dataloader):
1013
+ images = (batch["pixel_values"] + 1) / 2
1014
+ control_images = (batch["conditioning_pixel_values"] + 1) / 2
1015
+ prompts = batch["captions"]
1016
+
1017
+ if len(formatted_images) > 10:
1018
+ break
1019
+
1020
+ for img, control_img, prompt in zip(images, control_images, prompts):
1021
+ formatted_images.append(img)
1022
+ formatted_control_images.append(control_img)
1023
+ all_prompts.append(prompt)
1024
+
1025
+ logged_artifacts = []
1026
+ for img, control_img, prompt in zip(formatted_images, formatted_control_images, all_prompts):
1027
+ logged_artifacts.append(wandb.Image(control_img, caption="Conditioning"))
1028
+ logged_artifacts.append(wandb.Image(img, caption=prompt))
1029
+
1030
+ wandb_tracker = [tracker for tracker in accelerator.trackers if tracker.name == "wandb"]
1031
+ wandb_tracker[0].log({"dataset_samples": logged_artifacts})
1032
+
1033
+ progress_bar = tqdm(
1034
+ range(0, args.max_train_steps),
1035
+ initial=initial_global_step,
1036
+ desc="Steps",
1037
+ # Only show the progress bar once on each machine.
1038
+ disable=not accelerator.is_local_main_process,
1039
+ )
1040
+
1041
+ for epoch in range(first_epoch, args.num_train_epochs):
1042
+ cogview4_transformer.train()
1043
+ for step, batch in enumerate(train_dataloader):
1044
+ with accelerator.accumulate(cogview4_transformer):
1045
+ # Convert images to latent space
1046
+ # vae encode
1047
+ prompts = batch["captions"]
1048
+ attention_mask = tokenizer(
1049
+ prompts,
1050
+ padding="longest", # not use max length
1051
+ max_length=args.max_sequence_length,
1052
+ truncation=True,
1053
+ add_special_tokens=True,
1054
+ return_tensors="pt",
1055
+ ).attention_mask.float()
1056
+
1057
+ pixel_latents = encode_images(batch["pixel_values"], vae.to(accelerator.device), weight_dtype)
1058
+ control_latents = encode_images(
1059
+ batch["conditioning_pixel_values"], vae.to(accelerator.device), weight_dtype
1060
+ )
1061
+ if args.offload:
1062
+ vae.cpu()
1063
+
1064
+ # Sample a random timestep for each image
1065
+ # for weighting schemes where we sample timesteps non-uniformly
1066
+ bsz = pixel_latents.shape[0]
1067
+ noise = torch.randn_like(pixel_latents, device=accelerator.device, dtype=weight_dtype)
1068
+ u = compute_density_for_timestep_sampling(
1069
+ weighting_scheme=args.weighting_scheme,
1070
+ batch_size=bsz,
1071
+ logit_mean=args.logit_mean,
1072
+ logit_std=args.logit_std,
1073
+ mode_scale=args.mode_scale,
1074
+ )
1075
+
1076
+ # Add noise according for cogview4
1077
+ indices = (u * noise_scheduler_copy.config.num_train_timesteps).long()
1078
+ timesteps = noise_scheduler_copy.timesteps[indices].to(device=pixel_latents.device)
1079
+ sigmas = noise_scheduler_copy.sigmas[indices].to(device=pixel_latents.device)
1080
+ captions = batch["captions"]
1081
+ image_seq_lens = torch.tensor(
1082
+ pixel_latents.shape[2] * pixel_latents.shape[3] // patch_size**2,
1083
+ dtype=pixel_latents.dtype,
1084
+ device=pixel_latents.device,
1085
+ ) # H * W / VAE patch_size
1086
+ mu = torch.sqrt(image_seq_lens / 256)
1087
+ mu = mu * 0.75 + 0.25
1088
+ scale_factors = mu / (mu + (1 / sigmas - 1) ** 1.0).to(
1089
+ dtype=pixel_latents.dtype, device=pixel_latents.device
1090
+ )
1091
+ scale_factors = scale_factors.view(len(batch["captions"]), 1, 1, 1)
1092
+ noisy_model_input = (1.0 - scale_factors) * pixel_latents + scale_factors * noise
1093
+ concatenated_noisy_model_input = torch.cat([noisy_model_input, control_latents], dim=1)
1094
+ text_encoding_pipeline = text_encoding_pipeline.to("cuda")
1095
+
1096
+ with torch.no_grad():
1097
+ (
1098
+ prompt_embeds,
1099
+ pooled_prompt_embeds,
1100
+ ) = text_encoding_pipeline.encode_prompt(captions, "")
1101
+ original_size = (args.resolution, args.resolution)
1102
+ original_size = torch.tensor([original_size], dtype=prompt_embeds.dtype, device=prompt_embeds.device)
1103
+
1104
+ target_size = (args.resolution, args.resolution)
1105
+ target_size = torch.tensor([target_size], dtype=prompt_embeds.dtype, device=prompt_embeds.device)
1106
+
1107
+ target_size = target_size.repeat(len(batch["captions"]), 1)
1108
+ original_size = original_size.repeat(len(batch["captions"]), 1)
1109
+ crops_coords_top_left = torch.tensor([(0, 0)], dtype=prompt_embeds.dtype, device=prompt_embeds.device)
1110
+ crops_coords_top_left = crops_coords_top_left.repeat(len(batch["captions"]), 1)
1111
+
1112
+ # this could be optimized by not having to do any text encoding and just
1113
+ # doing zeros on specified shapes for `prompt_embeds` and `pooled_prompt_embeds`
1114
+ if args.proportion_empty_prompts and random.random() < args.proportion_empty_prompts:
1115
+ # Here, we directly pass 16 pad tokens from pooled_prompt_embeds to prompt_embeds.
1116
+ prompt_embeds = pooled_prompt_embeds
1117
+ if args.offload:
1118
+ text_encoding_pipeline = text_encoding_pipeline.to("cpu")
1119
+ # Predict.
1120
+ noise_pred_cond = cogview4_transformer(
1121
+ hidden_states=concatenated_noisy_model_input,
1122
+ encoder_hidden_states=prompt_embeds,
1123
+ timestep=timesteps,
1124
+ original_size=original_size,
1125
+ target_size=target_size,
1126
+ crop_coords=crops_coords_top_left,
1127
+ return_dict=False,
1128
+ attention_mask=attention_mask,
1129
+ )[0]
1130
+ # these weighting schemes use a uniform timestep sampling
1131
+ # and instead post-weight the loss
1132
+ weighting = compute_loss_weighting_for_sd3(weighting_scheme=args.weighting_scheme, sigmas=sigmas)
1133
+ # flow-matching loss
1134
+ target = noise - pixel_latents
1135
+
1136
+ weighting = weighting.view(len(batch["captions"]), 1, 1, 1)
1137
+ loss = torch.mean(
1138
+ (weighting.float() * (noise_pred_cond.float() - target.float()) ** 2).reshape(target.shape[0], -1),
1139
+ 1,
1140
+ )
1141
+ loss = loss.mean()
1142
+ accelerator.backward(loss)
1143
+
1144
+ if accelerator.sync_gradients:
1145
+ params_to_clip = cogview4_transformer.parameters()
1146
+ accelerator.clip_grad_norm_(params_to_clip, args.max_grad_norm)
1147
+ optimizer.step()
1148
+ lr_scheduler.step()
1149
+ optimizer.zero_grad()
1150
+
1151
+ # Checks if the accelerator has performed an optimization step behind the scenes
1152
+ if accelerator.sync_gradients:
1153
+ progress_bar.update(1)
1154
+ global_step += 1
1155
+
1156
+ # DeepSpeed requires saving weights on every device; saving weights only on the main process would cause issues.
1157
+ if accelerator.distributed_type == DistributedType.DEEPSPEED or accelerator.is_main_process:
1158
+ if global_step % args.checkpointing_steps == 0:
1159
+ # _before_ saving state, check if this save would set us over the `checkpoints_total_limit`
1160
+ if args.checkpoints_total_limit is not None:
1161
+ checkpoints = os.listdir(args.output_dir)
1162
+ checkpoints = [d for d in checkpoints if d.startswith("checkpoint")]
1163
+ checkpoints = sorted(checkpoints, key=lambda x: int(x.split("-")[1]))
1164
+
1165
+ # before we save the new checkpoint, we need to have at _most_ `checkpoints_total_limit - 1` checkpoints
1166
+ if len(checkpoints) >= args.checkpoints_total_limit:
1167
+ num_to_remove = len(checkpoints) - args.checkpoints_total_limit + 1
1168
+ removing_checkpoints = checkpoints[0:num_to_remove]
1169
+
1170
+ logger.info(
1171
+ f"{len(checkpoints)} checkpoints already exist, removing {len(removing_checkpoints)} checkpoints"
1172
+ )
1173
+ logger.info(f"removing checkpoints: {', '.join(removing_checkpoints)}")
1174
+
1175
+ for removing_checkpoint in removing_checkpoints:
1176
+ removing_checkpoint = os.path.join(args.output_dir, removing_checkpoint)
1177
+ shutil.rmtree(removing_checkpoint)
1178
+
1179
+ save_path = os.path.join(args.output_dir, f"checkpoint-{global_step}")
1180
+ accelerator.save_state(save_path)
1181
+ logger.info(f"Saved state to {save_path}")
1182
+
1183
+ if args.validation_prompt is not None and global_step % args.validation_steps == 0:
1184
+ image_logs = log_validation(
1185
+ cogview4_transformer=cogview4_transformer,
1186
+ args=args,
1187
+ accelerator=accelerator,
1188
+ weight_dtype=weight_dtype,
1189
+ step=global_step,
1190
+ )
1191
+
1192
+ logs = {"loss": loss.detach().item(), "lr": lr_scheduler.get_last_lr()[0]}
1193
+ progress_bar.set_postfix(**logs)
1194
+ accelerator.log(logs, step=global_step)
1195
+
1196
+ if global_step >= args.max_train_steps:
1197
+ break
1198
+
1199
+ # Create the pipeline using using the trained modules and save it.
1200
+ accelerator.wait_for_everyone()
1201
+ if accelerator.is_main_process:
1202
+ cogview4_transformer = unwrap_model(cogview4_transformer)
1203
+ if args.upcast_before_saving:
1204
+ cogview4_transformer.to(torch.float32)
1205
+ cogview4_transformer.save_pretrained(args.output_dir)
1206
+
1207
+ del cogview4_transformer
1208
+ del text_encoding_pipeline
1209
+ del vae
1210
+ free_memory()
1211
+
1212
+ # Run a final round of validation.
1213
+ image_logs = None
1214
+ if args.validation_prompt is not None:
1215
+ image_logs = log_validation(
1216
+ cogview4_transformer=None,
1217
+ args=args,
1218
+ accelerator=accelerator,
1219
+ weight_dtype=weight_dtype,
1220
+ step=global_step,
1221
+ is_final_validation=True,
1222
+ )
1223
+
1224
+ if args.push_to_hub:
1225
+ save_model_card(
1226
+ repo_id,
1227
+ image_logs=image_logs,
1228
+ base_model=args.pretrained_model_name_or_path,
1229
+ repo_folder=args.output_dir,
1230
+ )
1231
+ upload_folder(
1232
+ repo_id=repo_id,
1233
+ folder_path=args.output_dir,
1234
+ commit_message="End of training",
1235
+ ignore_patterns=["step_*", "epoch_*", "checkpoint-*"],
1236
+ )
1237
+
1238
+ accelerator.end_training()
1239
+
1240
+
1241
+ if __name__ == "__main__":
1242
+ args = parse_args()
1243
+ main(args)
exp_code/1_benchmark/diffusers-WanS2V/examples/community/README.md ADDED
The diff for this file is too large to render. See raw diff
 
exp_code/1_benchmark/diffusers-WanS2V/examples/community/README_community_scripts.md ADDED
@@ -0,0 +1,439 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Community Scripts
2
+
3
+ **Community scripts** consist of inference examples using Diffusers pipelines that have been added by the community.
4
+ Please have a look at the following table to get an overview of all community examples. Click on the **Code Example** to get a copy-and-paste code example that you can try out.
5
+ If a community script doesn't work as expected, please open an issue and ping the author on it.
6
+
7
+ | Example | Description | Code Example | Colab | Author |
8
+ |:--------------------------------------------------------------------------------------------------------------------------------------|:---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|:------------------------------------------------------------------------------------------|:-------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------:|
9
+ | Using IP-Adapter with Negative Noise | Using negative noise with IP-adapter to better control the generation (see the [original post](https://github.com/huggingface/diffusers/discussions/7167) on the forum for more details) | [IP-Adapter Negative Noise](#ip-adapter-negative-noise) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/ip_adapter_negative_noise.ipynb) | [Álvaro Somoza](https://github.com/asomoza)|
10
+ | Asymmetric Tiling |configure seamless image tiling independently for the X and Y axes | [Asymmetric Tiling](#Asymmetric-Tiling ) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/asymetric_tiling.ipynb) | [alexisrolland](https://github.com/alexisrolland)|
11
+ | Prompt Scheduling Callback |Allows changing prompts during a generation | [Prompt Scheduling-Callback](#Prompt-Scheduling-Callback ) |[Notebook](https://github.com/huggingface/notebooks/blob/main/diffusers/prompt_scheduling_callback.ipynb) | [hlky](https://github.com/hlky)|
12
+
13
+
14
+ ## Example usages
15
+
16
+ ### IP Adapter Negative Noise
17
+
18
+ Diffusers pipelines are fully integrated with IP-Adapter, which allows you to prompt the diffusion model with an image. However, it does not support negative image prompts (there is no `negative_ip_adapter_image` argument) the same way it supports negative text prompts. When you pass an `ip_adapter_image,` it will create a zero-filled tensor as a negative image. This script shows you how to create a negative noise from `ip_adapter_image` and use it to significantly improve the generation quality while preserving the composition of images.
19
+
20
+ [cubiq](https://github.com/cubiq) initially developed this feature in his [repository](https://github.com/cubiq/ComfyUI_IPAdapter_plus). The community script was contributed by [asomoza](https://github.com/Somoza). You can find more details about this experimentation [this discussion](https://github.com/huggingface/diffusers/discussions/7167)
21
+
22
+ IP-Adapter without negative noise
23
+ |source|result|
24
+ |---|---|
25
+ |![20240229150812](https://github.com/huggingface/diffusers/assets/5442875/901d8bd8-7a59-4fe7-bda1-a0e0d6c7dffd)|![20240229163923_normal](https://github.com/huggingface/diffusers/assets/5442875/3432e25a-ece6-45f4-a3f4-fca354f40b5b)|
26
+
27
+ IP-Adapter with negative noise
28
+ |source|result|
29
+ |---|---|
30
+ |![20240229150812](https://github.com/huggingface/diffusers/assets/5442875/901d8bd8-7a59-4fe7-bda1-a0e0d6c7dffd)|![20240229163923](https://github.com/huggingface/diffusers/assets/5442875/736fd15a-36ba-40c0-a7d8-6ec1ac26f788)|
31
+
32
+ ```python
33
+ import torch
34
+
35
+ from diffusers import AutoencoderKL, DPMSolverMultistepScheduler, StableDiffusionXLPipeline
36
+ from diffusers.models import ImageProjection
37
+ from diffusers.utils import load_image
38
+
39
+
40
+ def encode_image(
41
+ image_encoder,
42
+ feature_extractor,
43
+ image,
44
+ device,
45
+ num_images_per_prompt,
46
+ output_hidden_states=None,
47
+ negative_image=None,
48
+ ):
49
+ dtype = next(image_encoder.parameters()).dtype
50
+
51
+ if not isinstance(image, torch.Tensor):
52
+ image = feature_extractor(image, return_tensors="pt").pixel_values
53
+
54
+ image = image.to(device=device, dtype=dtype)
55
+ if output_hidden_states:
56
+ image_enc_hidden_states = image_encoder(image, output_hidden_states=True).hidden_states[-2]
57
+ image_enc_hidden_states = image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
58
+
59
+ if negative_image is None:
60
+ uncond_image_enc_hidden_states = image_encoder(
61
+ torch.zeros_like(image), output_hidden_states=True
62
+ ).hidden_states[-2]
63
+ else:
64
+ if not isinstance(negative_image, torch.Tensor):
65
+ negative_image = feature_extractor(negative_image, return_tensors="pt").pixel_values
66
+ negative_image = negative_image.to(device=device, dtype=dtype)
67
+ uncond_image_enc_hidden_states = image_encoder(negative_image, output_hidden_states=True).hidden_states[-2]
68
+
69
+ uncond_image_enc_hidden_states = uncond_image_enc_hidden_states.repeat_interleave(num_images_per_prompt, dim=0)
70
+ return image_enc_hidden_states, uncond_image_enc_hidden_states
71
+ else:
72
+ image_embeds = image_encoder(image).image_embeds
73
+ image_embeds = image_embeds.repeat_interleave(num_images_per_prompt, dim=0)
74
+ uncond_image_embeds = torch.zeros_like(image_embeds)
75
+
76
+ return image_embeds, uncond_image_embeds
77
+
78
+
79
+ @torch.no_grad()
80
+ def prepare_ip_adapter_image_embeds(
81
+ unet,
82
+ image_encoder,
83
+ feature_extractor,
84
+ ip_adapter_image,
85
+ do_classifier_free_guidance,
86
+ device,
87
+ num_images_per_prompt,
88
+ ip_adapter_negative_image=None,
89
+ ):
90
+ if not isinstance(ip_adapter_image, list):
91
+ ip_adapter_image = [ip_adapter_image]
92
+
93
+ if len(ip_adapter_image) != len(unet.encoder_hid_proj.image_projection_layers):
94
+ raise ValueError(
95
+ f"`ip_adapter_image` must have same length as the number of IP Adapters. Got {len(ip_adapter_image)} images and {len(unet.encoder_hid_proj.image_projection_layers)} IP Adapters."
96
+ )
97
+
98
+ image_embeds = []
99
+ for single_ip_adapter_image, image_proj_layer in zip(
100
+ ip_adapter_image, unet.encoder_hid_proj.image_projection_layers
101
+ ):
102
+ output_hidden_state = not isinstance(image_proj_layer, ImageProjection)
103
+ single_image_embeds, single_negative_image_embeds = encode_image(
104
+ image_encoder,
105
+ feature_extractor,
106
+ single_ip_adapter_image,
107
+ device,
108
+ 1,
109
+ output_hidden_state,
110
+ negative_image=ip_adapter_negative_image,
111
+ )
112
+ single_image_embeds = torch.stack([single_image_embeds] * num_images_per_prompt, dim=0)
113
+ single_negative_image_embeds = torch.stack([single_negative_image_embeds] * num_images_per_prompt, dim=0)
114
+
115
+ if do_classifier_free_guidance:
116
+ single_image_embeds = torch.cat([single_negative_image_embeds, single_image_embeds])
117
+ single_image_embeds = single_image_embeds.to(device)
118
+
119
+ image_embeds.append(single_image_embeds)
120
+
121
+ return image_embeds
122
+
123
+
124
+ vae = AutoencoderKL.from_pretrained(
125
+ "madebyollin/sdxl-vae-fp16-fix",
126
+ torch_dtype=torch.float16,
127
+ ).to("cuda")
128
+
129
+ pipeline = StableDiffusionXLPipeline.from_pretrained(
130
+ "RunDiffusion/Juggernaut-XL-v9",
131
+ torch_dtype=torch.float16,
132
+ vae=vae,
133
+ variant="fp16",
134
+ ).to("cuda")
135
+
136
+ pipeline.scheduler = DPMSolverMultistepScheduler.from_config(pipeline.scheduler.config)
137
+ pipeline.scheduler.config.use_karras_sigmas = True
138
+
139
+ pipeline.load_ip_adapter(
140
+ "h94/IP-Adapter",
141
+ subfolder="sdxl_models",
142
+ weight_name="ip-adapter-plus_sdxl_vit-h.safetensors",
143
+ image_encoder_folder="models/image_encoder",
144
+ )
145
+ pipeline.set_ip_adapter_scale(0.7)
146
+
147
+ ip_image = load_image("source.png")
148
+ negative_ip_image = load_image("noise.png")
149
+
150
+ image_embeds = prepare_ip_adapter_image_embeds(
151
+ unet=pipeline.unet,
152
+ image_encoder=pipeline.image_encoder,
153
+ feature_extractor=pipeline.feature_extractor,
154
+ ip_adapter_image=[[ip_image]],
155
+ do_classifier_free_guidance=True,
156
+ device="cuda",
157
+ num_images_per_prompt=1,
158
+ ip_adapter_negative_image=negative_ip_image,
159
+ )
160
+
161
+
162
+ prompt = "cinematic photo of a cyborg in the city, 4k, high quality, intricate, highly detailed"
163
+ negative_prompt = "blurry, smooth, plastic"
164
+
165
+ image = pipeline(
166
+ prompt=prompt,
167
+ negative_prompt=negative_prompt,
168
+ ip_adapter_image_embeds=image_embeds,
169
+ guidance_scale=6.0,
170
+ num_inference_steps=25,
171
+ generator=torch.Generator(device="cpu").manual_seed(1556265306),
172
+ ).images[0]
173
+
174
+ image.save("result.png")
175
+ ```
176
+
177
+ ### Asymmetric Tiling
178
+ Stable Diffusion is not trained to generate seamless textures. However, you can use this simple script to add tiling to your generation. This script is contributed by [alexisrolland](https://github.com/alexisrolland). See more details in the [this issue](https://github.com/huggingface/diffusers/issues/556)
179
+
180
+
181
+ |Generated|Tiled|
182
+ |---|---|
183
+ |![20240313003235_573631814](https://github.com/huggingface/diffusers/assets/5442875/eca174fb-06a4-464e-a3a7-00dbb024543e)|![wall](https://github.com/huggingface/diffusers/assets/5442875/b4aa774b-2a6a-4316-a8eb-8f30b5f4d024)|
184
+
185
+
186
+ ```py
187
+ import torch
188
+ from typing import Optional
189
+ from diffusers import StableDiffusionPipeline
190
+ from diffusers.models.lora import LoRACompatibleConv
191
+
192
+ def seamless_tiling(pipeline, x_axis, y_axis):
193
+ def asymmetric_conv2d_convforward(self, input: torch.Tensor, weight: torch.Tensor, bias: Optional[torch.Tensor] = None):
194
+ self.paddingX = (self._reversed_padding_repeated_twice[0], self._reversed_padding_repeated_twice[1], 0, 0)
195
+ self.paddingY = (0, 0, self._reversed_padding_repeated_twice[2], self._reversed_padding_repeated_twice[3])
196
+ working = torch.nn.functional.pad(input, self.paddingX, mode=x_mode)
197
+ working = torch.nn.functional.pad(working, self.paddingY, mode=y_mode)
198
+ return torch.nn.functional.conv2d(working, weight, bias, self.stride, torch.nn.modules.utils._pair(0), self.dilation, self.groups)
199
+ x_mode = 'circular' if x_axis else 'constant'
200
+ y_mode = 'circular' if y_axis else 'constant'
201
+ targets = [pipeline.vae, pipeline.text_encoder, pipeline.unet]
202
+ convolution_layers = []
203
+ for target in targets:
204
+ for module in target.modules():
205
+ if isinstance(module, torch.nn.Conv2d):
206
+ convolution_layers.append(module)
207
+ for layer in convolution_layers:
208
+ if isinstance(layer, LoRACompatibleConv) and layer.lora_layer is None:
209
+ layer.lora_layer = lambda * x: 0
210
+ layer._conv_forward = asymmetric_conv2d_convforward.__get__(layer, torch.nn.Conv2d)
211
+ return pipeline
212
+
213
+ pipeline = StableDiffusionPipeline.from_pretrained("stable-diffusion-v1-5/stable-diffusion-v1-5", torch_dtype=torch.float16, use_safetensors=True)
214
+ pipeline.enable_model_cpu_offload()
215
+ prompt = ["texture of a red brick wall"]
216
+ seed = 123456
217
+ generator = torch.Generator(device='cuda').manual_seed(seed)
218
+
219
+ pipeline = seamless_tiling(pipeline=pipeline, x_axis=True, y_axis=True)
220
+ image = pipeline(
221
+ prompt=prompt,
222
+ width=512,
223
+ height=512,
224
+ num_inference_steps=20,
225
+ guidance_scale=7,
226
+ num_images_per_prompt=1,
227
+ generator=generator
228
+ ).images[0]
229
+ seamless_tiling(pipeline=pipeline, x_axis=False, y_axis=False)
230
+
231
+ torch.cuda.empty_cache()
232
+ image.save('image.png')
233
+ ```
234
+
235
+ ### Prompt Scheduling callback
236
+
237
+ Prompt scheduling callback allows changing prompts during a generation, like [prompt editing in A1111](https://github.com/AUTOMATIC1111/stable-diffusion-webui/wiki/Features#prompt-editing)
238
+
239
+ ```python
240
+ from diffusers import StableDiffusionPipeline
241
+ from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
242
+ from diffusers.configuration_utils import register_to_config
243
+ import torch
244
+ from typing import Any, Dict, Tuple, Union
245
+
246
+
247
+ class SDPromptSchedulingCallback(PipelineCallback):
248
+ @register_to_config
249
+ def __init__(
250
+ self,
251
+ encoded_prompt: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
252
+ cutoff_step_ratio=None,
253
+ cutoff_step_index=None,
254
+ ):
255
+ super().__init__(
256
+ cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
257
+ )
258
+
259
+ tensor_inputs = ["prompt_embeds"]
260
+
261
+ def callback_fn(
262
+ self, pipeline, step_index, timestep, callback_kwargs
263
+ ) -> Dict[str, Any]:
264
+ cutoff_step_ratio = self.config.cutoff_step_ratio
265
+ cutoff_step_index = self.config.cutoff_step_index
266
+ if isinstance(self.config.encoded_prompt, tuple):
267
+ prompt_embeds, negative_prompt_embeds = self.config.encoded_prompt
268
+ else:
269
+ prompt_embeds = self.config.encoded_prompt
270
+
271
+ # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
272
+ cutoff_step = (
273
+ cutoff_step_index
274
+ if cutoff_step_index is not None
275
+ else int(pipeline.num_timesteps * cutoff_step_ratio)
276
+ )
277
+
278
+ if step_index == cutoff_step:
279
+ if pipeline.do_classifier_free_guidance:
280
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
281
+ callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
282
+ return callback_kwargs
283
+
284
+
285
+ pipeline: StableDiffusionPipeline = StableDiffusionPipeline.from_pretrained(
286
+ "stable-diffusion-v1-5/stable-diffusion-v1-5",
287
+ torch_dtype=torch.float16,
288
+ variant="fp16",
289
+ use_safetensors=True,
290
+ ).to("cuda")
291
+ pipeline.safety_checker = None
292
+ pipeline.requires_safety_checker = False
293
+
294
+ callback = MultiPipelineCallbacks(
295
+ [
296
+ SDPromptSchedulingCallback(
297
+ encoded_prompt=pipeline.encode_prompt(
298
+ prompt=f"prompt {index}",
299
+ negative_prompt=f"negative prompt {index}",
300
+ device=pipeline._execution_device,
301
+ num_images_per_prompt=1,
302
+ # pipeline.do_classifier_free_guidance can't be accessed until after pipeline is ran
303
+ do_classifier_free_guidance=True,
304
+ ),
305
+ cutoff_step_index=index,
306
+ ) for index in range(1, 20)
307
+ ]
308
+ )
309
+
310
+ image = pipeline(
311
+ prompt="prompt"
312
+ negative_prompt="negative prompt",
313
+ callback_on_step_end=callback,
314
+ callback_on_step_end_tensor_inputs=["prompt_embeds"],
315
+ ).images[0]
316
+ torch.cuda.empty_cache()
317
+ image.save('image.png')
318
+ ```
319
+
320
+ ```python
321
+ from diffusers import StableDiffusionXLPipeline
322
+ from diffusers.callbacks import PipelineCallback, MultiPipelineCallbacks
323
+ from diffusers.configuration_utils import register_to_config
324
+ import torch
325
+ from typing import Any, Dict, Tuple, Union
326
+
327
+
328
+ class SDXLPromptSchedulingCallback(PipelineCallback):
329
+ @register_to_config
330
+ def __init__(
331
+ self,
332
+ encoded_prompt: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
333
+ add_text_embeds: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
334
+ add_time_ids: Union[torch.Tensor, Tuple[torch.Tensor, torch.Tensor]],
335
+ cutoff_step_ratio=None,
336
+ cutoff_step_index=None,
337
+ ):
338
+ super().__init__(
339
+ cutoff_step_ratio=cutoff_step_ratio, cutoff_step_index=cutoff_step_index
340
+ )
341
+
342
+ tensor_inputs = ["prompt_embeds", "add_text_embeds", "add_time_ids"]
343
+
344
+ def callback_fn(
345
+ self, pipeline, step_index, timestep, callback_kwargs
346
+ ) -> Dict[str, Any]:
347
+ cutoff_step_ratio = self.config.cutoff_step_ratio
348
+ cutoff_step_index = self.config.cutoff_step_index
349
+ if isinstance(self.config.encoded_prompt, tuple):
350
+ prompt_embeds, negative_prompt_embeds = self.config.encoded_prompt
351
+ else:
352
+ prompt_embeds = self.config.encoded_prompt
353
+ if isinstance(self.config.add_text_embeds, tuple):
354
+ add_text_embeds, negative_add_text_embeds = self.config.add_text_embeds
355
+ else:
356
+ add_text_embeds = self.config.add_text_embeds
357
+ if isinstance(self.config.add_time_ids, tuple):
358
+ add_time_ids, negative_add_time_ids = self.config.add_time_ids
359
+ else:
360
+ add_time_ids = self.config.add_time_ids
361
+
362
+ # Use cutoff_step_index if it's not None, otherwise use cutoff_step_ratio
363
+ cutoff_step = (
364
+ cutoff_step_index
365
+ if cutoff_step_index is not None
366
+ else int(pipeline.num_timesteps * cutoff_step_ratio)
367
+ )
368
+
369
+ if step_index == cutoff_step:
370
+ if pipeline.do_classifier_free_guidance:
371
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
372
+ add_text_embeds = torch.cat([negative_add_text_embeds, add_text_embeds])
373
+ add_time_ids = torch.cat([negative_add_time_ids, add_time_ids])
374
+ callback_kwargs[self.tensor_inputs[0]] = prompt_embeds
375
+ callback_kwargs[self.tensor_inputs[1]] = add_text_embeds
376
+ callback_kwargs[self.tensor_inputs[2]] = add_time_ids
377
+ return callback_kwargs
378
+
379
+
380
+ pipeline: StableDiffusionXLPipeline = StableDiffusionXLPipeline.from_pretrained(
381
+ "stabilityai/stable-diffusion-xl-base-1.0",
382
+ torch_dtype=torch.float16,
383
+ variant="fp16",
384
+ use_safetensors=True,
385
+ ).to("cuda")
386
+
387
+ callbacks = []
388
+ for index in range(1, 20):
389
+ (
390
+ prompt_embeds,
391
+ negative_prompt_embeds,
392
+ pooled_prompt_embeds,
393
+ negative_pooled_prompt_embeds,
394
+ ) = pipeline.encode_prompt(
395
+ prompt=f"prompt {index}",
396
+ negative_prompt=f"prompt {index}",
397
+ device=pipeline._execution_device,
398
+ num_images_per_prompt=1,
399
+ # pipeline.do_classifier_free_guidance can't be accessed until after pipeline is ran
400
+ do_classifier_free_guidance=True,
401
+ )
402
+ text_encoder_projection_dim = int(pooled_prompt_embeds.shape[-1])
403
+ add_time_ids = pipeline._get_add_time_ids(
404
+ (1024, 1024),
405
+ (0, 0),
406
+ (1024, 1024),
407
+ dtype=prompt_embeds.dtype,
408
+ text_encoder_projection_dim=text_encoder_projection_dim,
409
+ )
410
+ negative_add_time_ids = pipeline._get_add_time_ids(
411
+ (1024, 1024),
412
+ (0, 0),
413
+ (1024, 1024),
414
+ dtype=prompt_embeds.dtype,
415
+ text_encoder_projection_dim=text_encoder_projection_dim,
416
+ )
417
+ callbacks.append(
418
+ SDXLPromptSchedulingCallback(
419
+ encoded_prompt=(prompt_embeds, negative_prompt_embeds),
420
+ add_text_embeds=(pooled_prompt_embeds, negative_pooled_prompt_embeds),
421
+ add_time_ids=(add_time_ids, negative_add_time_ids),
422
+ cutoff_step_index=index,
423
+ )
424
+ )
425
+
426
+
427
+ callback = MultiPipelineCallbacks(callbacks)
428
+
429
+ image = pipeline(
430
+ prompt="prompt",
431
+ negative_prompt="negative prompt",
432
+ callback_on_step_end=callback,
433
+ callback_on_step_end_tensor_inputs=[
434
+ "prompt_embeds",
435
+ "add_text_embeds",
436
+ "add_time_ids",
437
+ ],
438
+ ).images[0]
439
+ ```
exp_code/1_benchmark/diffusers-WanS2V/examples/community/adaptive_mask_inpainting.py ADDED
@@ -0,0 +1,1469 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # Copyright 2023 The HuggingFace Team. All rights reserved.
2
+ #
3
+ # Licensed under the Apache License, Version 2.0 (the "License");
4
+ # you may not use this file except in compliance with the License.
5
+ # You may obtain a copy of the License at
6
+ #
7
+ # http://www.apache.org/licenses/LICENSE-2.0
8
+ #
9
+ # Unless required by applicable law or agreed to in writing, software
10
+ # distributed under the License is distributed on an "AS IS" BASIS,
11
+ # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
12
+ # See the License for the specific language governing permissions and
13
+ # limitations under the License.
14
+
15
+ # This model implementation is heavily inspired by https://github.com/haofanwang/ControlNet-for-Diffusers/
16
+
17
+ import inspect
18
+ import os
19
+ import shutil
20
+ from glob import glob
21
+ from typing import Any, Callable, Dict, List, Optional, Union
22
+
23
+ import cv2
24
+ import numpy as np
25
+ import PIL.Image
26
+ import requests
27
+ import torch
28
+ from detectron2.config import get_cfg
29
+ from detectron2.data import MetadataCatalog
30
+ from detectron2.engine import DefaultPredictor
31
+ from detectron2.projects import point_rend
32
+ from detectron2.structures.instances import Instances
33
+ from detectron2.utils.visualizer import ColorMode, Visualizer
34
+ from packaging import version
35
+ from tqdm import tqdm
36
+ from transformers import CLIPImageProcessor, CLIPTextModel, CLIPTokenizer
37
+
38
+ from diffusers.configuration_utils import FrozenDict
39
+ from diffusers.image_processor import VaeImageProcessor
40
+ from diffusers.loaders import FromSingleFileMixin, LoraLoaderMixin, TextualInversionLoaderMixin
41
+ from diffusers.models import AsymmetricAutoencoderKL, AutoencoderKL, UNet2DConditionModel
42
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline
43
+ from diffusers.pipelines.stable_diffusion import StableDiffusionPipelineOutput
44
+ from diffusers.schedulers import KarrasDiffusionSchedulers
45
+ from diffusers.utils import (
46
+ deprecate,
47
+ is_accelerate_available,
48
+ is_accelerate_version,
49
+ logging,
50
+ randn_tensor,
51
+ )
52
+
53
+
54
+ logger = logging.get_logger(__name__) # pylint: disable=invalid-name
55
+
56
+
57
+ AMI_INSTALL_MESSAGE = """
58
+
59
+ Example Demo of Adaptive Mask Inpainting
60
+
61
+ Beyond the Contact: Discovering Comprehensive Affordance for 3D Objects from Pre-trained 2D Diffusion Models
62
+ Kim et al.
63
+ ECCV-2024 (Oral)
64
+
65
+
66
+ Please prepare the environment via
67
+
68
+ ```
69
+ conda create --name ami python=3.9 -y
70
+ conda activate ami
71
+
72
+ conda install pytorch==1.10.1 torchvision==0.11.2 torchaudio==0.10.1 cudatoolkit=11.3 -c pytorch -c conda-forge -y
73
+ python -m pip install detectron2==0.6 -f https://dl.fbaipublicfiles.com/detectron2/wheels/cu113/torch1.10/index.html
74
+ pip install easydict
75
+ pip install diffusers==0.20.2 accelerate safetensors transformers
76
+ pip install setuptools==59.5.0
77
+ pip install opencv-python
78
+ pip install numpy==1.24.1
79
+ ```
80
+
81
+
82
+ Put the code inside the root of diffusers library (e.g., as '/home/username/diffusers/adaptive_mask_inpainting_example.py') and run the python code.
83
+
84
+
85
+
86
+
87
+ """
88
+
89
+
90
+ EXAMPLE_DOC_STRING = """
91
+ Examples:
92
+ ```py
93
+ >>> # !pip install transformers accelerate
94
+ >>> from diffusers import StableDiffusionControlNetInpaintPipeline, ControlNetModel, DDIMScheduler
95
+ >>> from diffusers.utils import load_image
96
+ >>> import numpy as np
97
+ >>> import torch
98
+
99
+ >>> init_image = load_image(
100
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy.png"
101
+ ... )
102
+ >>> init_image = init_image.resize((512, 512))
103
+
104
+ >>> generator = torch.Generator(device="cpu").manual_seed(1)
105
+
106
+ >>> mask_image = load_image(
107
+ ... "https://huggingface.co/datasets/diffusers/test-arrays/resolve/main/stable_diffusion_inpaint/boy_mask.png"
108
+ ... )
109
+ >>> mask_image = mask_image.resize((512, 512))
110
+
111
+
112
+ >>> def make_inpaint_condition(image, image_mask):
113
+ ... image = np.array(image.convert("RGB")).astype(np.float32) / 255.0
114
+ ... image_mask = np.array(image_mask.convert("L")).astype(np.float32) / 255.0
115
+
116
+ ... assert image.shape[0:1] == image_mask.shape[0:1], "image and image_mask must have the same image size"
117
+ ... image[image_mask > 0.5] = -1.0 # set as masked pixel
118
+ ... image = np.expand_dims(image, 0).transpose(0, 3, 1, 2)
119
+ ... image = torch.from_numpy(image)
120
+ ... return image
121
+
122
+
123
+ >>> control_image = make_inpaint_condition(init_image, mask_image)
124
+
125
+ >>> controlnet = ControlNetModel.from_pretrained(
126
+ ... "lllyasviel/control_v11p_sd15_inpaint", torch_dtype=torch.float16
127
+ ... )
128
+ >>> pipe = StableDiffusionControlNetInpaintPipeline.from_pretrained(
129
+ ... "runwayml/stable-diffusion-v1-5", controlnet=controlnet, torch_dtype=torch.float16
130
+ ... )
131
+
132
+ >>> pipe.scheduler = DDIMScheduler.from_config(pipe.scheduler.config)
133
+ >>> pipe.enable_model_cpu_offload()
134
+
135
+ >>> # generate image
136
+ >>> image = pipe(
137
+ ... "a handsome man with ray-ban sunglasses",
138
+ ... num_inference_steps=20,
139
+ ... generator=generator,
140
+ ... eta=1.0,
141
+ ... image=init_image,
142
+ ... mask_image=mask_image,
143
+ ... control_image=control_image,
144
+ ... ).images[0]
145
+ ```
146
+ """
147
+
148
+
149
+ def download_file(url, output_file, exist_ok: bool):
150
+ if exist_ok and os.path.exists(output_file):
151
+ return
152
+
153
+ response = requests.get(url, stream=True)
154
+
155
+ with open(output_file, "wb") as file:
156
+ for chunk in tqdm(response.iter_content(chunk_size=8192), desc=f"Downloading '{output_file}'..."):
157
+ if chunk:
158
+ file.write(chunk)
159
+
160
+
161
+ def generate_video_from_imgs(images_save_directory, fps=15.0, delete_dir=True):
162
+ # delete videos if exists
163
+ if os.path.exists(f"{images_save_directory}.mp4"):
164
+ os.remove(f"{images_save_directory}.mp4")
165
+ if os.path.exists(f"{images_save_directory}_before_process.mp4"):
166
+ os.remove(f"{images_save_directory}_before_process.mp4")
167
+
168
+ # assume there are "enumerated" images under "images_save_directory"
169
+ assert os.path.isdir(images_save_directory)
170
+ ImgPaths = sorted(glob(f"{images_save_directory}/*"))
171
+
172
+ if len(ImgPaths) == 0:
173
+ print("\tSkipping, since there must be at least one image to create mp4\n")
174
+ else:
175
+ # mp4 configuration
176
+ video_path = images_save_directory + "_before_process.mp4"
177
+
178
+ # Get height and width config
179
+ images = sorted([ImgPath.split("/")[-1] for ImgPath in ImgPaths if ImgPath.endswith(".png")])
180
+ frame = cv2.imread(os.path.join(images_save_directory, images[0]))
181
+ height, width, channels = frame.shape
182
+
183
+ # create mp4 video writer
184
+ fourcc = cv2.VideoWriter_fourcc(*"mp4v")
185
+ video = cv2.VideoWriter(video_path, fourcc, fps, (width, height))
186
+ for image in images:
187
+ video.write(cv2.imread(os.path.join(images_save_directory, image)))
188
+ cv2.destroyAllWindows()
189
+ video.release()
190
+
191
+ # generated video is not compatible with HTML5. Post-process and change codec of video, so that it is applicable to HTML.
192
+ os.system(
193
+ f'ffmpeg -i "{images_save_directory}_before_process.mp4" -vcodec libx264 -f mp4 "{images_save_directory}.mp4" '
194
+ )
195
+
196
+ # remove group of images, and remove video before post-process.
197
+ if delete_dir and os.path.exists(images_save_directory):
198
+ shutil.rmtree(images_save_directory)
199
+ # remove 'before-process' video
200
+ if os.path.exists(f"{images_save_directory}_before_process.mp4"):
201
+ os.remove(f"{images_save_directory}_before_process.mp4")
202
+
203
+
204
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_inpaint.prepare_mask_and_masked_image
205
+ def prepare_mask_and_masked_image(image, mask, height, width, return_image=False):
206
+ """
207
+ Prepares a pair (image, mask) to be consumed by the Stable Diffusion pipeline. This means that those inputs will be
208
+ converted to ``torch.Tensor`` with shapes ``batch x channels x height x width`` where ``channels`` is ``3`` for the
209
+ ``image`` and ``1`` for the ``mask``.
210
+
211
+ The ``image`` will be converted to ``torch.float32`` and normalized to be in ``[-1, 1]``. The ``mask`` will be
212
+ binarized (``mask > 0.5``) and cast to ``torch.float32`` too.
213
+
214
+ Args:
215
+ image (Union[np.array, PIL.Image, torch.Tensor]): The image to inpaint.
216
+ It can be a ``PIL.Image``, or a ``height x width x 3`` ``np.array`` or a ``channels x height x width``
217
+ ``torch.Tensor`` or a ``batch x channels x height x width`` ``torch.Tensor``.
218
+ mask (_type_): The mask to apply to the image, i.e. regions to inpaint.
219
+ It can be a ``PIL.Image``, or a ``height x width`` ``np.array`` or a ``1 x height x width``
220
+ ``torch.Tensor`` or a ``batch x 1 x height x width`` ``torch.Tensor``.
221
+
222
+
223
+ Raises:
224
+ ValueError: ``torch.Tensor`` images should be in the ``[-1, 1]`` range. ValueError: ``torch.Tensor`` mask
225
+ should be in the ``[0, 1]`` range. ValueError: ``mask`` and ``image`` should have the same spatial dimensions.
226
+ TypeError: ``mask`` is a ``torch.Tensor`` but ``image`` is not
227
+ (ot the other way around).
228
+
229
+ Returns:
230
+ tuple[torch.Tensor]: The pair (mask, masked_image) as ``torch.Tensor`` with 4
231
+ dimensions: ``batch x channels x height x width``.
232
+ """
233
+
234
+ if image is None:
235
+ raise ValueError("`image` input cannot be undefined.")
236
+
237
+ if mask is None:
238
+ raise ValueError("`mask_image` input cannot be undefined.")
239
+
240
+ if isinstance(image, torch.Tensor):
241
+ if not isinstance(mask, torch.Tensor):
242
+ raise TypeError(f"`image` is a torch.Tensor but `mask` (type: {type(mask)} is not")
243
+
244
+ # Batch single image
245
+ if image.ndim == 3:
246
+ assert image.shape[0] == 3, "Image outside a batch should be of shape (3, H, W)"
247
+ image = image.unsqueeze(0)
248
+
249
+ # Batch and add channel dim for single mask
250
+ if mask.ndim == 2:
251
+ mask = mask.unsqueeze(0).unsqueeze(0)
252
+
253
+ # Batch single mask or add channel dim
254
+ if mask.ndim == 3:
255
+ # Single batched mask, no channel dim or single mask not batched but channel dim
256
+ if mask.shape[0] == 1:
257
+ mask = mask.unsqueeze(0)
258
+
259
+ # Batched masks no channel dim
260
+ else:
261
+ mask = mask.unsqueeze(1)
262
+
263
+ assert image.ndim == 4 and mask.ndim == 4, "Image and Mask must have 4 dimensions"
264
+ assert image.shape[-2:] == mask.shape[-2:], "Image and Mask must have the same spatial dimensions"
265
+ assert image.shape[0] == mask.shape[0], "Image and Mask must have the same batch size"
266
+
267
+ # Check image is in [-1, 1]
268
+ if image.min() < -1 or image.max() > 1:
269
+ raise ValueError("Image should be in [-1, 1] range")
270
+
271
+ # Check mask is in [0, 1]
272
+ if mask.min() < 0 or mask.max() > 1:
273
+ raise ValueError("Mask should be in [0, 1] range")
274
+
275
+ # Binarize mask
276
+ mask[mask < 0.5] = 0
277
+ mask[mask >= 0.5] = 1
278
+
279
+ # Image as float32
280
+ image = image.to(dtype=torch.float32)
281
+ elif isinstance(mask, torch.Tensor):
282
+ raise TypeError(f"`mask` is a torch.Tensor but `image` (type: {type(image)} is not")
283
+ else:
284
+ # preprocess image
285
+ if isinstance(image, (PIL.Image.Image, np.ndarray)):
286
+ image = [image]
287
+ if isinstance(image, list) and isinstance(image[0], PIL.Image.Image):
288
+ # resize all images w.r.t passed height an width
289
+ image = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in image]
290
+ image = [np.array(i.convert("RGB"))[None, :] for i in image]
291
+ image = np.concatenate(image, axis=0)
292
+ elif isinstance(image, list) and isinstance(image[0], np.ndarray):
293
+ image = np.concatenate([i[None, :] for i in image], axis=0)
294
+
295
+ image = image.transpose(0, 3, 1, 2)
296
+ image = torch.from_numpy(image).to(dtype=torch.float32) / 127.5 - 1.0
297
+
298
+ # preprocess mask
299
+ if isinstance(mask, (PIL.Image.Image, np.ndarray)):
300
+ mask = [mask]
301
+
302
+ if isinstance(mask, list) and isinstance(mask[0], PIL.Image.Image):
303
+ mask = [i.resize((width, height), resample=PIL.Image.LANCZOS) for i in mask]
304
+ mask = np.concatenate([np.array(m.convert("L"))[None, None, :] for m in mask], axis=0)
305
+ mask = mask.astype(np.float32) / 255.0
306
+ elif isinstance(mask, list) and isinstance(mask[0], np.ndarray):
307
+ mask = np.concatenate([m[None, None, :] for m in mask], axis=0)
308
+
309
+ mask[mask < 0.5] = 0
310
+ mask[mask >= 0.5] = 1
311
+ mask = torch.from_numpy(mask)
312
+
313
+ masked_image = image * (mask < 0.5)
314
+
315
+ # n.b. ensure backwards compatibility as old function does not return image
316
+ if return_image:
317
+ return mask, masked_image, image
318
+
319
+ return mask, masked_image
320
+
321
+
322
+ class AdaptiveMaskInpaintPipeline(
323
+ DiffusionPipeline, TextualInversionLoaderMixin, LoraLoaderMixin, FromSingleFileMixin
324
+ ):
325
+ r"""
326
+ Pipeline for text-guided image inpainting using Stable Diffusion.
327
+
328
+ This model inherits from [`DiffusionPipeline`]. Check the superclass documentation for the generic methods
329
+ implemented for all pipelines (downloading, saving, running on a particular device, etc.).
330
+
331
+ The pipeline also inherits the following loading methods:
332
+ - [`~loaders.TextualInversionLoaderMixin.load_textual_inversion`] for loading textual inversion embeddings
333
+ - [`~loaders.LoraLoaderMixin.load_lora_weights`] for loading LoRA weights
334
+ - [`~loaders.LoraLoaderMixin.save_lora_weights`] for saving LoRA weights
335
+
336
+ Args:
337
+ vae ([`AutoencoderKL`, `AsymmetricAutoencoderKL`]):
338
+ Variational Auto-Encoder (VAE) Model to encode and decode images to and from latent representations.
339
+ text_encoder ([`CLIPTextModel`]):
340
+ Frozen text-encoder ([clip-vit-large-patch14](https://huggingface.co/openai/clip-vit-large-patch14)).
341
+ tokenizer ([`~transformers.CLIPTokenizer`]):
342
+ A `CLIPTokenizer` to tokenize text.
343
+ unet ([`UNet2DConditionModel`]):
344
+ A `UNet2DConditionModel` to denoise the encoded image latents.
345
+ scheduler ([`SchedulerMixin`]):
346
+ A scheduler to be used in combination with `unet` to denoise the encoded image latents. Can be one of
347
+ [`DDIMScheduler`], [`LMSDiscreteScheduler`], or [`PNDMScheduler`].
348
+ safety_checker ([`StableDiffusionSafetyChecker`]):
349
+ Classification module that estimates whether generated images could be considered offensive or harmful.
350
+ Please refer to the [model card](https://huggingface.co/runwayml/stable-diffusion-v1-5) for more details
351
+ about a model's potential harms.
352
+ feature_extractor ([`~transformers.CLIPImageProcessor`]):
353
+ A `CLIPImageProcessor` to extract features from generated images; used as inputs to the `safety_checker`.
354
+ """
355
+
356
+ _optional_components = ["safety_checker", "feature_extractor"]
357
+
358
+ def __init__(
359
+ self,
360
+ vae: Union[AutoencoderKL, AsymmetricAutoencoderKL],
361
+ text_encoder: CLIPTextModel,
362
+ tokenizer: CLIPTokenizer,
363
+ unet: UNet2DConditionModel,
364
+ scheduler: KarrasDiffusionSchedulers,
365
+ # safety_checker: StableDiffusionSafetyChecker,
366
+ safety_checker,
367
+ feature_extractor: CLIPImageProcessor,
368
+ requires_safety_checker: bool = True,
369
+ ):
370
+ super().__init__()
371
+
372
+ self.register_adaptive_mask_model()
373
+ self.register_adaptive_mask_settings()
374
+
375
+ if scheduler is not None and getattr(scheduler.config, "steps_offset", 1) != 1:
376
+ deprecation_message = (
377
+ f"The configuration file of this scheduler: {scheduler} is outdated. `steps_offset`"
378
+ f" should be set to 1 instead of {scheduler.config.steps_offset}. Please make sure "
379
+ "to update the config accordingly as leaving `steps_offset` might led to incorrect results"
380
+ " in future versions. If you have downloaded this checkpoint from the Hugging Face Hub,"
381
+ " it would be very nice if you could open a Pull request for the `scheduler/scheduler_config.json`"
382
+ " file"
383
+ )
384
+ deprecate("steps_offset!=1", "1.0.0", deprecation_message, standard_warn=False)
385
+ new_config = dict(scheduler.config)
386
+ new_config["steps_offset"] = 1
387
+ scheduler._internal_dict = FrozenDict(new_config)
388
+
389
+ if scheduler is not None and getattr(scheduler.config, "skip_prk_steps", True) is False:
390
+ deprecation_message = (
391
+ f"The configuration file of this scheduler: {scheduler} has not set the configuration"
392
+ " `skip_prk_steps`. `skip_prk_steps` should be set to True in the configuration file. Please make"
393
+ " sure to update the config accordingly as not setting `skip_prk_steps` in the config might lead to"
394
+ " incorrect results in future versions. If you have downloaded this checkpoint from the Hugging Face"
395
+ " Hub, it would be very nice if you could open a Pull request for the"
396
+ " `scheduler/scheduler_config.json` file"
397
+ )
398
+ deprecate("skip_prk_steps not set", "1.0.0", deprecation_message, standard_warn=False)
399
+ new_config = dict(scheduler.config)
400
+ new_config["skip_prk_steps"] = True
401
+ scheduler._internal_dict = FrozenDict(new_config)
402
+
403
+ if safety_checker is None and requires_safety_checker:
404
+ logger.warning(
405
+ f"You have disabled the safety checker for {self.__class__} by passing `safety_checker=None`. Ensure"
406
+ " that you abide to the conditions of the Stable Diffusion license and do not expose unfiltered"
407
+ " results in services or applications open to the public. Both the diffusers team and Hugging Face"
408
+ " strongly recommend to keep the safety filter enabled in all public facing circumstances, disabling"
409
+ " it only for use-cases that involve analyzing network behavior or auditing its results. For more"
410
+ " information, please have a look at https://github.com/huggingface/diffusers/pull/254 ."
411
+ )
412
+
413
+ if safety_checker is not None and feature_extractor is None:
414
+ raise ValueError(
415
+ "Make sure to define a feature extractor when loading {self.__class__} if you want to use the safety"
416
+ " checker. If you do not want to use the safety checker, you can pass `'safety_checker=None'` instead."
417
+ )
418
+
419
+ is_unet_version_less_0_9_0 = (
420
+ unet is not None
421
+ and hasattr(unet.config, "_diffusers_version")
422
+ and version.parse(version.parse(unet.config._diffusers_version).base_version) < version.parse("0.9.0.dev0")
423
+ )
424
+ is_unet_sample_size_less_64 = (
425
+ unet is not None and hasattr(unet.config, "sample_size") and unet.config.sample_size < 64
426
+ )
427
+ if is_unet_version_less_0_9_0 and is_unet_sample_size_less_64:
428
+ deprecation_message = (
429
+ "The configuration file of the unet has set the default `sample_size` to smaller than"
430
+ " 64 which seems highly unlikely .If you're checkpoint is a fine-tuned version of any of the"
431
+ " following: \n- CompVis/stable-diffusion-v1-4 \n- CompVis/stable-diffusion-v1-3 \n-"
432
+ " CompVis/stable-diffusion-v1-2 \n- CompVis/stable-diffusion-v1-1 \n- runwayml/stable-diffusion-v1-5"
433
+ " \n- runwayml/stable-diffusion-inpainting \n you should change 'sample_size' to 64 in the"
434
+ " configuration file. Please make sure to update the config accordingly as leaving `sample_size=32`"
435
+ " in the config might lead to incorrect results in future versions. If you have downloaded this"
436
+ " checkpoint from the Hugging Face Hub, it would be very nice if you could open a Pull request for"
437
+ " the `unet/config.json` file"
438
+ )
439
+ deprecate("sample_size<64", "1.0.0", deprecation_message, standard_warn=False)
440
+ new_config = dict(unet.config)
441
+ new_config["sample_size"] = 64
442
+ unet._internal_dict = FrozenDict(new_config)
443
+
444
+ # Check shapes, assume num_channels_latents == 4, num_channels_mask == 1, num_channels_masked == 4
445
+ if unet is not None and unet.config.in_channels != 9:
446
+ logger.info(f"You have loaded a UNet with {unet.config.in_channels} input channels which.")
447
+
448
+ self.register_modules(
449
+ vae=vae,
450
+ text_encoder=text_encoder,
451
+ tokenizer=tokenizer,
452
+ unet=unet,
453
+ scheduler=scheduler,
454
+ safety_checker=safety_checker,
455
+ feature_extractor=feature_extractor,
456
+ )
457
+ self.vae_scale_factor = 2 ** (len(self.vae.config.block_out_channels) - 1) if getattr(self, "vae", None) else 8
458
+ self.image_processor = VaeImageProcessor(vae_scale_factor=self.vae_scale_factor)
459
+ self.register_to_config(requires_safety_checker=requires_safety_checker)
460
+
461
+ """ Preparation for Adaptive Mask inpainting """
462
+
463
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.enable_model_cpu_offload
464
+ def enable_model_cpu_offload(self, gpu_id=0):
465
+ r"""
466
+ Offload all models to CPU to reduce memory usage with a low impact on performance. Moves one whole model at a
467
+ time to the GPU when its `forward` method is called, and the model remains in GPU until the next model runs.
468
+ Memory savings are lower than using `enable_sequential_cpu_offload`, but performance is much better due to the
469
+ iterative execution of the `unet`.
470
+ """
471
+ if is_accelerate_available() and is_accelerate_version(">=", "0.17.0.dev0"):
472
+ from accelerate import cpu_offload_with_hook
473
+ else:
474
+ raise ImportError("`enable_model_cpu_offload` requires `accelerate v0.17.0` or higher.")
475
+
476
+ device = torch.device(f"cuda:{gpu_id}")
477
+
478
+ if self.device.type != "cpu":
479
+ self.to("cpu", silence_dtype_warnings=True)
480
+ torch.cuda.empty_cache() # otherwise we don't see the memory savings (but they probably exist)
481
+
482
+ hook = None
483
+ for cpu_offloaded_model in [self.text_encoder, self.unet, self.vae]:
484
+ _, hook = cpu_offload_with_hook(cpu_offloaded_model, device, prev_module_hook=hook)
485
+
486
+ if self.safety_checker is not None:
487
+ _, hook = cpu_offload_with_hook(self.safety_checker, device, prev_module_hook=hook)
488
+
489
+ # We'll offload the last model manually.
490
+ self.final_offload_hook = hook
491
+
492
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline._encode_prompt
493
+ def _encode_prompt(
494
+ self,
495
+ prompt,
496
+ device,
497
+ num_images_per_prompt,
498
+ do_classifier_free_guidance,
499
+ negative_prompt=None,
500
+ prompt_embeds: Optional[torch.FloatTensor] = None,
501
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
502
+ lora_scale: Optional[float] = None,
503
+ ):
504
+ r"""
505
+ Encodes the prompt into text encoder hidden states.
506
+
507
+ Args:
508
+ prompt (`str` or `List[str]`, *optional*):
509
+ prompt to be encoded
510
+ device: (`torch.device`):
511
+ torch device
512
+ num_images_per_prompt (`int`):
513
+ number of images that should be generated per prompt
514
+ do_classifier_free_guidance (`bool`):
515
+ whether to use classifier free guidance or not
516
+ negative_prompt (`str` or `List[str]`, *optional*):
517
+ The prompt or prompts not to guide the image generation. If not defined, one has to pass
518
+ `negative_prompt_embeds` instead. Ignored when not using guidance (i.e., ignored if `guidance_scale` is
519
+ less than `1`).
520
+ prompt_embeds (`torch.FloatTensor`, *optional*):
521
+ Pre-generated text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt weighting. If not
522
+ provided, text embeddings will be generated from `prompt` input argument.
523
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
524
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs, *e.g.* prompt
525
+ weighting. If not provided, negative_prompt_embeds will be generated from `negative_prompt` input
526
+ argument.
527
+ lora_scale (`float`, *optional*):
528
+ A lora scale that will be applied to all LoRA layers of the text encoder if LoRA layers are loaded.
529
+ """
530
+ # set lora scale so that monkey patched LoRA
531
+ # function of text encoder can correctly access it
532
+ if lora_scale is not None and isinstance(self, LoraLoaderMixin):
533
+ self._lora_scale = lora_scale
534
+
535
+ if prompt is not None and isinstance(prompt, str):
536
+ batch_size = 1
537
+ elif prompt is not None and isinstance(prompt, list):
538
+ batch_size = len(prompt)
539
+ else:
540
+ batch_size = prompt_embeds.shape[0]
541
+
542
+ if prompt_embeds is None:
543
+ # textual inversion: procecss multi-vector tokens if necessary
544
+ if isinstance(self, TextualInversionLoaderMixin):
545
+ prompt = self.maybe_convert_prompt(prompt, self.tokenizer)
546
+
547
+ text_inputs = self.tokenizer(
548
+ prompt,
549
+ padding="max_length",
550
+ max_length=self.tokenizer.model_max_length,
551
+ truncation=True,
552
+ return_tensors="pt",
553
+ )
554
+ text_input_ids = text_inputs.input_ids
555
+ untruncated_ids = self.tokenizer(prompt, padding="longest", return_tensors="pt").input_ids
556
+
557
+ if untruncated_ids.shape[-1] >= text_input_ids.shape[-1] and not torch.equal(
558
+ text_input_ids, untruncated_ids
559
+ ):
560
+ removed_text = self.tokenizer.batch_decode(
561
+ untruncated_ids[:, self.tokenizer.model_max_length - 1 : -1]
562
+ )
563
+ logger.warning(
564
+ "The following part of your input was truncated because CLIP can only handle sequences up to"
565
+ f" {self.tokenizer.model_max_length} tokens: {removed_text}"
566
+ )
567
+
568
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
569
+ attention_mask = text_inputs.attention_mask.to(device)
570
+ else:
571
+ attention_mask = None
572
+
573
+ prompt_embeds = self.text_encoder(
574
+ text_input_ids.to(device),
575
+ attention_mask=attention_mask,
576
+ )
577
+ prompt_embeds = prompt_embeds[0]
578
+
579
+ if self.text_encoder is not None:
580
+ prompt_embeds_dtype = self.text_encoder.dtype
581
+ elif self.unet is not None:
582
+ prompt_embeds_dtype = self.unet.dtype
583
+ else:
584
+ prompt_embeds_dtype = prompt_embeds.dtype
585
+
586
+ prompt_embeds = prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
587
+
588
+ bs_embed, seq_len, _ = prompt_embeds.shape
589
+ # duplicate text embeddings for each generation per prompt, using mps friendly method
590
+ prompt_embeds = prompt_embeds.repeat(1, num_images_per_prompt, 1)
591
+ prompt_embeds = prompt_embeds.view(bs_embed * num_images_per_prompt, seq_len, -1)
592
+
593
+ # get unconditional embeddings for classifier free guidance
594
+ if do_classifier_free_guidance and negative_prompt_embeds is None:
595
+ uncond_tokens: List[str]
596
+ if negative_prompt is None:
597
+ uncond_tokens = [""] * batch_size
598
+ elif prompt is not None and type(prompt) is not type(negative_prompt):
599
+ raise TypeError(
600
+ f"`negative_prompt` should be the same type to `prompt`, but got {type(negative_prompt)} !="
601
+ f" {type(prompt)}."
602
+ )
603
+ elif isinstance(negative_prompt, str):
604
+ uncond_tokens = [negative_prompt]
605
+ elif batch_size != len(negative_prompt):
606
+ raise ValueError(
607
+ f"`negative_prompt`: {negative_prompt} has batch size {len(negative_prompt)}, but `prompt`:"
608
+ f" {prompt} has batch size {batch_size}. Please make sure that passed `negative_prompt` matches"
609
+ " the batch size of `prompt`."
610
+ )
611
+ else:
612
+ uncond_tokens = negative_prompt
613
+
614
+ # textual inversion: procecss multi-vector tokens if necessary
615
+ if isinstance(self, TextualInversionLoaderMixin):
616
+ uncond_tokens = self.maybe_convert_prompt(uncond_tokens, self.tokenizer)
617
+
618
+ max_length = prompt_embeds.shape[1]
619
+ uncond_input = self.tokenizer(
620
+ uncond_tokens,
621
+ padding="max_length",
622
+ max_length=max_length,
623
+ truncation=True,
624
+ return_tensors="pt",
625
+ )
626
+
627
+ if hasattr(self.text_encoder.config, "use_attention_mask") and self.text_encoder.config.use_attention_mask:
628
+ attention_mask = uncond_input.attention_mask.to(device)
629
+ else:
630
+ attention_mask = None
631
+
632
+ negative_prompt_embeds = self.text_encoder(
633
+ uncond_input.input_ids.to(device),
634
+ attention_mask=attention_mask,
635
+ )
636
+ negative_prompt_embeds = negative_prompt_embeds[0]
637
+
638
+ if do_classifier_free_guidance:
639
+ # duplicate unconditional embeddings for each generation per prompt, using mps friendly method
640
+ seq_len = negative_prompt_embeds.shape[1]
641
+
642
+ negative_prompt_embeds = negative_prompt_embeds.to(dtype=prompt_embeds_dtype, device=device)
643
+
644
+ negative_prompt_embeds = negative_prompt_embeds.repeat(1, num_images_per_prompt, 1)
645
+ negative_prompt_embeds = negative_prompt_embeds.view(batch_size * num_images_per_prompt, seq_len, -1)
646
+
647
+ # For classifier free guidance, we need to do two forward passes.
648
+ # Here we concatenate the unconditional and text embeddings into a single batch
649
+ # to avoid doing two forward passes
650
+ prompt_embeds = torch.cat([negative_prompt_embeds, prompt_embeds])
651
+
652
+ return prompt_embeds
653
+
654
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.run_safety_checker
655
+ def run_safety_checker(self, image, device, dtype):
656
+ if self.safety_checker is None:
657
+ has_nsfw_concept = None
658
+ else:
659
+ if torch.is_tensor(image):
660
+ feature_extractor_input = self.image_processor.postprocess(image, output_type="pil")
661
+ else:
662
+ feature_extractor_input = self.image_processor.numpy_to_pil(image)
663
+ safety_checker_input = self.feature_extractor(feature_extractor_input, return_tensors="pt").to(device)
664
+ image, has_nsfw_concept = self.safety_checker(
665
+ images=image, clip_input=safety_checker_input.pixel_values.to(dtype)
666
+ )
667
+ return image, has_nsfw_concept
668
+
669
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion.StableDiffusionPipeline.prepare_extra_step_kwargs
670
+ def prepare_extra_step_kwargs(self, generator, eta):
671
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
672
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
673
+ # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
674
+ # and should be between [0, 1]
675
+
676
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
677
+ extra_step_kwargs = {}
678
+ if accepts_eta:
679
+ extra_step_kwargs["eta"] = eta
680
+
681
+ # check if the scheduler accepts generator
682
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
683
+ if accepts_generator:
684
+ extra_step_kwargs["generator"] = generator
685
+ return extra_step_kwargs
686
+
687
+ def check_inputs(
688
+ self,
689
+ prompt,
690
+ height,
691
+ width,
692
+ strength,
693
+ callback_steps,
694
+ negative_prompt=None,
695
+ prompt_embeds=None,
696
+ negative_prompt_embeds=None,
697
+ ):
698
+ if strength < 0 or strength > 1:
699
+ raise ValueError(f"The value of strength should in [0.0, 1.0] but is {strength}")
700
+
701
+ if height % 8 != 0 or width % 8 != 0:
702
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
703
+
704
+ if (callback_steps is None) or (
705
+ callback_steps is not None and (not isinstance(callback_steps, int) or callback_steps <= 0)
706
+ ):
707
+ raise ValueError(
708
+ f"`callback_steps` has to be a positive integer but is {callback_steps} of type"
709
+ f" {type(callback_steps)}."
710
+ )
711
+
712
+ if prompt is not None and prompt_embeds is not None:
713
+ raise ValueError(
714
+ f"Cannot forward both `prompt`: {prompt} and `prompt_embeds`: {prompt_embeds}. Please make sure to"
715
+ " only forward one of the two."
716
+ )
717
+ elif prompt is None and prompt_embeds is None:
718
+ raise ValueError(
719
+ "Provide either `prompt` or `prompt_embeds`. Cannot leave both `prompt` and `prompt_embeds` undefined."
720
+ )
721
+ elif prompt is not None and (not isinstance(prompt, str) and not isinstance(prompt, list)):
722
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
723
+
724
+ if negative_prompt is not None and negative_prompt_embeds is not None:
725
+ raise ValueError(
726
+ f"Cannot forward both `negative_prompt`: {negative_prompt} and `negative_prompt_embeds`:"
727
+ f" {negative_prompt_embeds}. Please make sure to only forward one of the two."
728
+ )
729
+
730
+ if prompt_embeds is not None and negative_prompt_embeds is not None:
731
+ if prompt_embeds.shape != negative_prompt_embeds.shape:
732
+ raise ValueError(
733
+ "`prompt_embeds` and `negative_prompt_embeds` must have the same shape when passed directly, but"
734
+ f" got: `prompt_embeds` {prompt_embeds.shape} != `negative_prompt_embeds`"
735
+ f" {negative_prompt_embeds.shape}."
736
+ )
737
+
738
+ def prepare_latents(
739
+ self,
740
+ batch_size,
741
+ num_channels_latents,
742
+ height,
743
+ width,
744
+ dtype,
745
+ device,
746
+ generator,
747
+ latents=None,
748
+ image=None,
749
+ timestep=None,
750
+ is_strength_max=True,
751
+ return_noise=False,
752
+ return_image_latents=False,
753
+ ):
754
+ shape = (batch_size, num_channels_latents, height // self.vae_scale_factor, width // self.vae_scale_factor)
755
+ if isinstance(generator, list) and len(generator) != batch_size:
756
+ raise ValueError(
757
+ f"You have passed a list of generators of length {len(generator)}, but requested an effective batch"
758
+ f" size of {batch_size}. Make sure the batch size matches the length of the generators."
759
+ )
760
+
761
+ if (image is None or timestep is None) and not is_strength_max:
762
+ raise ValueError(
763
+ "Since strength < 1. initial latents are to be initialised as a combination of Image + Noise."
764
+ "However, either the image or the noise timestep has not been provided."
765
+ )
766
+
767
+ if return_image_latents or (latents is None and not is_strength_max):
768
+ image = image.to(device=device, dtype=dtype)
769
+ image_latents = self._encode_vae_image(image=image, generator=generator)
770
+
771
+ if latents is None:
772
+ noise = randn_tensor(shape, generator=generator, device=device, dtype=dtype)
773
+ # if strength is 1. then initialise the latents to noise, else initial to image + noise
774
+ latents = noise if is_strength_max else self.scheduler.add_noise(image_latents, noise, timestep)
775
+ # if pure noise then scale the initial latents by the Scheduler's init sigma
776
+ latents = latents * self.scheduler.init_noise_sigma if is_strength_max else latents
777
+ else:
778
+ noise = latents.to(device)
779
+ latents = noise * self.scheduler.init_noise_sigma
780
+
781
+ outputs = (latents,)
782
+
783
+ if return_noise:
784
+ outputs += (noise,)
785
+
786
+ if return_image_latents:
787
+ outputs += (image_latents,)
788
+
789
+ return outputs
790
+
791
+ def _encode_vae_image(self, image: torch.Tensor, generator: torch.Generator):
792
+ if isinstance(generator, list):
793
+ image_latents = [
794
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator=generator[i])
795
+ for i in range(image.shape[0])
796
+ ]
797
+ image_latents = torch.cat(image_latents, dim=0)
798
+ else:
799
+ image_latents = self.vae.encode(image).latent_dist.sample(generator=generator)
800
+
801
+ image_latents = self.vae.config.scaling_factor * image_latents
802
+
803
+ return image_latents
804
+
805
+ def prepare_mask_latents(
806
+ self, mask, masked_image, batch_size, height, width, dtype, device, generator, do_classifier_free_guidance
807
+ ):
808
+ # resize the mask to latents shape as we concatenate the mask to the latents
809
+ # we do that before converting to dtype to avoid breaking in case we're using cpu_offload
810
+ # and half precision
811
+ mask = torch.nn.functional.interpolate(
812
+ mask, size=(height // self.vae_scale_factor, width // self.vae_scale_factor)
813
+ )
814
+ mask = mask.to(device=device, dtype=dtype)
815
+
816
+ masked_image = masked_image.to(device=device, dtype=dtype)
817
+ masked_image_latents = self._encode_vae_image(masked_image, generator=generator)
818
+
819
+ # duplicate mask and masked_image_latents for each generation per prompt, using mps friendly method
820
+ if mask.shape[0] < batch_size:
821
+ if not batch_size % mask.shape[0] == 0:
822
+ raise ValueError(
823
+ "The passed mask and the required batch size don't match. Masks are supposed to be duplicated to"
824
+ f" a total batch size of {batch_size}, but {mask.shape[0]} masks were passed. Make sure the number"
825
+ " of masks that you pass is divisible by the total requested batch size."
826
+ )
827
+ mask = mask.repeat(batch_size // mask.shape[0], 1, 1, 1)
828
+ if masked_image_latents.shape[0] < batch_size:
829
+ if not batch_size % masked_image_latents.shape[0] == 0:
830
+ raise ValueError(
831
+ "The passed images and the required batch size don't match. Images are supposed to be duplicated"
832
+ f" to a total batch size of {batch_size}, but {masked_image_latents.shape[0]} images were passed."
833
+ " Make sure the number of images that you pass is divisible by the total requested batch size."
834
+ )
835
+ masked_image_latents = masked_image_latents.repeat(batch_size // masked_image_latents.shape[0], 1, 1, 1)
836
+
837
+ mask = torch.cat([mask] * 2) if do_classifier_free_guidance else mask
838
+ masked_image_latents = (
839
+ torch.cat([masked_image_latents] * 2) if do_classifier_free_guidance else masked_image_latents
840
+ )
841
+
842
+ # aligning device to prevent device errors when concating it with the latent model input
843
+ masked_image_latents = masked_image_latents.to(device=device, dtype=dtype)
844
+ return mask, masked_image_latents
845
+
846
+ # Copied from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion_img2img.StableDiffusionImg2ImgPipeline.get_timesteps
847
+ def get_timesteps(self, num_inference_steps, strength, device):
848
+ # get the original timestep using init_timestep
849
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
850
+
851
+ t_start = max(num_inference_steps - init_timestep, 0)
852
+ timesteps = self.scheduler.timesteps[t_start * self.scheduler.order :]
853
+
854
+ return timesteps, num_inference_steps - t_start
855
+
856
+ @torch.no_grad()
857
+ def __call__(
858
+ self,
859
+ prompt: Union[str, List[str]] = None,
860
+ image: Union[torch.FloatTensor, PIL.Image.Image] = None,
861
+ default_mask_image: Union[torch.FloatTensor, PIL.Image.Image] = None,
862
+ height: Optional[int] = None,
863
+ width: Optional[int] = None,
864
+ strength: float = 1.0,
865
+ num_inference_steps: int = 50,
866
+ guidance_scale: float = 7.5,
867
+ negative_prompt: Optional[Union[str, List[str]]] = None,
868
+ num_images_per_prompt: Optional[int] = 1,
869
+ eta: float = 0.0,
870
+ generator: Optional[Union[torch.Generator, List[torch.Generator]]] = None,
871
+ latents: Optional[torch.FloatTensor] = None,
872
+ prompt_embeds: Optional[torch.FloatTensor] = None,
873
+ negative_prompt_embeds: Optional[torch.FloatTensor] = None,
874
+ output_type: Optional[str] = "pil",
875
+ return_dict: bool = True,
876
+ callback: Optional[Callable[[int, int, torch.FloatTensor], None]] = None,
877
+ callback_steps: int = 1,
878
+ cross_attention_kwargs: Optional[Dict[str, Any]] = None,
879
+ use_adaptive_mask: bool = True,
880
+ enforce_full_mask_ratio: float = 0.5,
881
+ human_detection_thres: float = 0.008,
882
+ visualization_save_dir: str = None,
883
+ ):
884
+ r"""
885
+ The call function to the pipeline for generation.
886
+
887
+ Args:
888
+ prompt (`str` or `List[str]`, *optional*):
889
+ The prompt or prompts to guide image generation. If not defined, you need to pass `prompt_embeds`.
890
+ image (`PIL.Image.Image`):
891
+ `Image` or tensor representing an image batch to be inpainted (which parts of the image to be masked
892
+ out with `default_mask_image` and repainted according to `prompt`).
893
+ default_mask_image (`PIL.Image.Image`):
894
+ `Image` or tensor representing an image batch to mask `image`. White pixels in the mask are repainted
895
+ while black pixels are preserved. If `default_mask_image` is a PIL image, it is converted to a single channel
896
+ (luminance) before use. If it's a tensor, it should contain one color channel (L) instead of 3, so the
897
+ expected shape would be `(B, H, W, 1)`.
898
+ height (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
899
+ The height in pixels of the generated image.
900
+ width (`int`, *optional*, defaults to `self.unet.config.sample_size * self.vae_scale_factor`):
901
+ The width in pixels of the generated image.
902
+ strength (`float`, *optional*, defaults to 1.0):
903
+ Indicates extent to transform the reference `image`. Must be between 0 and 1. `image` is used as a
904
+ starting point and more noise is added the higher the `strength`. The number of denoising steps depends
905
+ on the amount of noise initially added. When `strength` is 1, added noise is maximum and the denoising
906
+ process runs for the full number of iterations specified in `num_inference_steps`. A value of 1
907
+ essentially ignores `image`.
908
+ num_inference_steps (`int`, *optional*, defaults to 50):
909
+ The number of denoising steps. More denoising steps usually lead to a higher quality image at the
910
+ expense of slower inference. This parameter is modulated by `strength`.
911
+ guidance_scale (`float`, *optional*, defaults to 7.5):
912
+ A higher guidance scale value encourages the model to generate images closely linked to the text
913
+ `prompt` at the expense of lower image quality. Guidance scale is enabled when `guidance_scale > 1`.
914
+ negative_prompt (`str` or `List[str]`, *optional*):
915
+ The prompt or prompts to guide what to not include in image generation. If not defined, you need to
916
+ pass `negative_prompt_embeds` instead. Ignored when not using guidance (`guidance_scale < 1`).
917
+ num_images_per_prompt (`int`, *optional*, defaults to 1):
918
+ The number of images to generate per prompt.
919
+ eta (`float`, *optional*, defaults to 0.0):
920
+ Corresponds to parameter eta (η) from the [DDIM](https://huggingface.co/papers/2010.02502) paper. Only applies
921
+ to the [`~schedulers.DDIMScheduler`], and is ignored in other schedulers.
922
+ generator (`torch.Generator` or `List[torch.Generator]`, *optional*):
923
+ A [`torch.Generator`](https://pytorch.org/docs/stable/generated/torch.Generator.html) to make
924
+ generation deterministic.
925
+ latents (`torch.FloatTensor`, *optional*):
926
+ Pre-generated noisy latents sampled from a Gaussian distribution, to be used as inputs for image
927
+ generation. Can be used to tweak the same generation with different prompts. If not provided, a latents
928
+ tensor is generated by sampling using the supplied random `generator`.
929
+ prompt_embeds (`torch.FloatTensor`, *optional*):
930
+ Pre-generated text embeddings. Can be used to easily tweak text inputs (prompt weighting). If not
931
+ provided, text embeddings are generated from the `prompt` input argument.
932
+ negative_prompt_embeds (`torch.FloatTensor`, *optional*):
933
+ Pre-generated negative text embeddings. Can be used to easily tweak text inputs (prompt weighting). If
934
+ not provided, `negative_prompt_embeds` are generated from the `negative_prompt` input argument.
935
+ output_type (`str`, *optional*, defaults to `"pil"`):
936
+ The output format of the generated image. Choose between `PIL.Image` or `np.array`.
937
+ return_dict (`bool`, *optional*, defaults to `True`):
938
+ Whether or not to return a [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] instead of a
939
+ plain tuple.
940
+ callback (`Callable`, *optional*):
941
+ A function that calls every `callback_steps` steps during inference. The function is called with the
942
+ following arguments: `callback(step: int, timestep: int, latents: torch.FloatTensor)`.
943
+ callback_steps (`int`, *optional*, defaults to 1):
944
+ The frequency at which the `callback` function is called. If not specified, the callback is called at
945
+ every step.
946
+ cross_attention_kwargs (`dict`, *optional*):
947
+ A kwargs dictionary that if specified is passed along to the [`AttentionProcessor`] as defined in
948
+ [`self.processor`](https://github.com/huggingface/diffusers/blob/main/src/diffusers/models/attention_processor.py).
949
+
950
+ Examples:
951
+
952
+ ```py
953
+ >>> import PIL
954
+ >>> import requests
955
+ >>> import torch
956
+ >>> from io import BytesIO
957
+
958
+ >>> from diffusers import AdaptiveMaskInpaintPipeline
959
+
960
+
961
+ >>> def download_image(url):
962
+ ... response = requests.get(url)
963
+ ... return PIL.Image.open(BytesIO(response.content)).convert("RGB")
964
+
965
+
966
+ >>> img_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo.png"
967
+ >>> mask_url = "https://raw.githubusercontent.com/CompVis/latent-diffusion/main/data/inpainting_examples/overture-creations-5sI6fQgYIuo_mask.png"
968
+
969
+ >>> init_image = download_image(img_url).resize((512, 512))
970
+ >>> default_mask_image = download_image(mask_url).resize((512, 512))
971
+
972
+ >>> pipe = AdaptiveMaskInpaintPipeline.from_pretrained(
973
+ ... "runwayml/stable-diffusion-inpainting", torch_dtype=torch.float16
974
+ ... )
975
+ >>> pipe = pipe.to("cuda")
976
+
977
+ >>> prompt = "Face of a yellow cat, high resolution, sitting on a park bench"
978
+ >>> image = pipe(prompt=prompt, image=init_image, default_mask_image=default_mask_image).images[0]
979
+ ```
980
+
981
+ Returns:
982
+ [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] or `tuple`:
983
+ If `return_dict` is `True`, [`~pipelines.stable_diffusion.StableDiffusionPipelineOutput`] is returned,
984
+ otherwise a `tuple` is returned where the first element is a list with the generated images and the
985
+ second element is a list of `bool`s indicating whether the corresponding generated image contains
986
+ "not-safe-for-work" (nsfw) content.
987
+ """
988
+ # 0. Default height and width to unet
989
+ width, height = image.size
990
+ # height = height or self.unet.config.sample_size * self.vae_scale_factor
991
+ # width = width or self.unet.config.sample_size * self.vae_scale_factor
992
+
993
+ # 1. Check inputs
994
+ self.check_inputs(
995
+ prompt,
996
+ height,
997
+ width,
998
+ strength,
999
+ callback_steps,
1000
+ negative_prompt,
1001
+ prompt_embeds,
1002
+ negative_prompt_embeds,
1003
+ )
1004
+
1005
+ # 2. Define call parameters
1006
+ if prompt is not None and isinstance(prompt, str):
1007
+ batch_size = 1
1008
+ elif prompt is not None and isinstance(prompt, list):
1009
+ batch_size = len(prompt)
1010
+ else:
1011
+ batch_size = prompt_embeds.shape[0]
1012
+
1013
+ device = self._execution_device
1014
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
1015
+ # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
1016
+ # corresponds to doing no classifier free guidance.
1017
+ do_classifier_free_guidance = guidance_scale > 1.0
1018
+
1019
+ # 3. Encode input prompt
1020
+ text_encoder_lora_scale = (
1021
+ cross_attention_kwargs.get("scale", None) if cross_attention_kwargs is not None else None
1022
+ )
1023
+ prompt_embeds = self._encode_prompt(
1024
+ prompt,
1025
+ device,
1026
+ num_images_per_prompt,
1027
+ do_classifier_free_guidance,
1028
+ negative_prompt,
1029
+ prompt_embeds=prompt_embeds,
1030
+ negative_prompt_embeds=negative_prompt_embeds,
1031
+ lora_scale=text_encoder_lora_scale,
1032
+ )
1033
+
1034
+ # 4. set timesteps
1035
+ self.scheduler.set_timesteps(num_inference_steps, device=device)
1036
+ timesteps, num_inference_steps = self.get_timesteps(
1037
+ num_inference_steps=num_inference_steps, strength=strength, device=device
1038
+ )
1039
+ # check that number of inference steps is not < 1 - as this doesn't make sense
1040
+ if num_inference_steps < 1:
1041
+ raise ValueError(
1042
+ f"After adjusting the num_inference_steps by strength parameter: {strength}, the number of pipeline"
1043
+ f"steps is {num_inference_steps} which is < 1 and not appropriate for this pipeline."
1044
+ )
1045
+ # at which timestep to set the initial noise (n.b. 50% if strength is 0.5)
1046
+ latent_timestep = timesteps[:1].repeat(batch_size * num_images_per_prompt)
1047
+ # create a boolean to check if the strength is set to 1. if so then initialise the latents with pure noise
1048
+ is_strength_max = strength == 1.0
1049
+
1050
+ # 5. Preprocess mask and image (will be used later, once again)
1051
+ mask, masked_image, init_image = prepare_mask_and_masked_image(
1052
+ image, default_mask_image, height, width, return_image=True
1053
+ )
1054
+ default_mask_image_np = np.array(default_mask_image).astype(np.uint8) / 255
1055
+ mask_condition = mask.clone()
1056
+
1057
+ # 6. Prepare latent variables
1058
+ num_channels_latents = self.vae.config.latent_channels
1059
+ num_channels_unet = self.unet.config.in_channels
1060
+ return_image_latents = num_channels_unet == 4
1061
+
1062
+ latents_outputs = self.prepare_latents(
1063
+ batch_size * num_images_per_prompt,
1064
+ num_channels_latents,
1065
+ height,
1066
+ width,
1067
+ prompt_embeds.dtype,
1068
+ device,
1069
+ generator,
1070
+ latents,
1071
+ image=init_image,
1072
+ timestep=latent_timestep,
1073
+ is_strength_max=is_strength_max,
1074
+ return_noise=True,
1075
+ return_image_latents=return_image_latents,
1076
+ )
1077
+
1078
+ if return_image_latents:
1079
+ latents, noise, image_latents = latents_outputs
1080
+ else:
1081
+ latents, noise = latents_outputs
1082
+
1083
+ # 7. Prepare mask latent variables
1084
+ mask, masked_image_latents = self.prepare_mask_latents(
1085
+ mask,
1086
+ masked_image,
1087
+ batch_size * num_images_per_prompt,
1088
+ height,
1089
+ width,
1090
+ prompt_embeds.dtype,
1091
+ device,
1092
+ generator,
1093
+ do_classifier_free_guidance,
1094
+ )
1095
+
1096
+ # 8. Check that sizes of mask, masked image and latents match
1097
+ if num_channels_unet == 9:
1098
+ # default case for runwayml/stable-diffusion-inpainting
1099
+ num_channels_mask = mask.shape[1]
1100
+ num_channels_masked_image = masked_image_latents.shape[1]
1101
+ if num_channels_latents + num_channels_mask + num_channels_masked_image != self.unet.config.in_channels:
1102
+ raise ValueError(
1103
+ f"Incorrect configuration settings! The config of `pipeline.unet`: {self.unet.config} expects"
1104
+ f" {self.unet.config.in_channels} but received `num_channels_latents`: {num_channels_latents} +"
1105
+ f" `num_channels_mask`: {num_channels_mask} + `num_channels_masked_image`: {num_channels_masked_image}"
1106
+ f" = {num_channels_latents + num_channels_masked_image + num_channels_mask}. Please verify the config of"
1107
+ " `pipeline.unet` or your `default_mask_image` or `image` input."
1108
+ )
1109
+ elif num_channels_unet != 4:
1110
+ raise ValueError(
1111
+ f"The unet {self.unet.__class__} should have either 4 or 9 input channels, not {self.unet.config.in_channels}."
1112
+ )
1113
+
1114
+ # 9. Prepare extra step kwargs. TODO: Logic should ideally just be moved out of the pipeline
1115
+ extra_step_kwargs = self.prepare_extra_step_kwargs(generator, eta)
1116
+
1117
+ # 10. Denoising loop
1118
+ mask_image_np = default_mask_image_np
1119
+ num_warmup_steps = len(timesteps) - num_inference_steps * self.scheduler.order
1120
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
1121
+ for i, t in enumerate(timesteps):
1122
+ # expand the latents if we are doing classifier free guidance
1123
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
1124
+
1125
+ # concat latents, mask, masked_image_latents in the channel dimension
1126
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
1127
+
1128
+ if num_channels_unet == 9:
1129
+ latent_model_input = torch.cat([latent_model_input, mask, masked_image_latents], dim=1)
1130
+ else:
1131
+ raise NotImplementedError
1132
+
1133
+ # predict the noise residual
1134
+ noise_pred = self.unet(
1135
+ latent_model_input,
1136
+ t,
1137
+ encoder_hidden_states=prompt_embeds,
1138
+ cross_attention_kwargs=cross_attention_kwargs,
1139
+ return_dict=False,
1140
+ )[0]
1141
+
1142
+ # perform guidance
1143
+ if do_classifier_free_guidance:
1144
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
1145
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
1146
+
1147
+ # compute the previous noisy sample x_t -> x_t-1 & predicted original sample x_0
1148
+ outputs = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs, return_dict=True)
1149
+ latents = outputs["prev_sample"] # x_t-1
1150
+ pred_orig_latents = outputs["pred_original_sample"] # x_0
1151
+
1152
+ # run segmentation
1153
+ if use_adaptive_mask:
1154
+ if enforce_full_mask_ratio > 0.0:
1155
+ use_default_mask = t < self.scheduler.config.num_train_timesteps * enforce_full_mask_ratio
1156
+ elif enforce_full_mask_ratio == 0.0:
1157
+ use_default_mask = False
1158
+ else:
1159
+ raise NotImplementedError
1160
+
1161
+ pred_orig_image = self.decode_to_npuint8_image(pred_orig_latents)
1162
+ dilate_num = self.adaptive_mask_settings.dilate_scheduler(i)
1163
+ do_adapt_mask = self.adaptive_mask_settings.provoke_scheduler(i)
1164
+ if do_adapt_mask:
1165
+ mask, masked_image_latents, mask_image_np, vis_np = self.adapt_mask(
1166
+ init_image,
1167
+ pred_orig_image,
1168
+ default_mask_image_np,
1169
+ dilate_num=dilate_num,
1170
+ use_default_mask=use_default_mask,
1171
+ height=height,
1172
+ width=width,
1173
+ batch_size=batch_size,
1174
+ num_images_per_prompt=num_images_per_prompt,
1175
+ prompt_embeds=prompt_embeds,
1176
+ device=device,
1177
+ generator=generator,
1178
+ do_classifier_free_guidance=do_classifier_free_guidance,
1179
+ i=i,
1180
+ human_detection_thres=human_detection_thres,
1181
+ mask_image_np=mask_image_np,
1182
+ )
1183
+
1184
+ if self.adaptive_mask_model.use_visualizer:
1185
+ import matplotlib.pyplot as plt
1186
+
1187
+ # mask_image_new_colormap = np.clip(0.6 + (1.0 - mask_image_np), a_min=0.0, a_max=1.0) * 255
1188
+
1189
+ os.makedirs(visualization_save_dir, exist_ok=True)
1190
+
1191
+ # Image.fromarray(mask_image_new_colormap).convert("L").save(f"{visualization_save_dir}/masks/{i:05}.png")
1192
+ plt.axis("off")
1193
+ plt.subplot(1, 2, 1)
1194
+ plt.imshow(mask_image_np)
1195
+ plt.subplot(1, 2, 2)
1196
+ plt.imshow(pred_orig_image)
1197
+ plt.savefig(f"{visualization_save_dir}/{i:05}.png", bbox_inches="tight")
1198
+ plt.close("all")
1199
+
1200
+ if num_channels_unet == 4:
1201
+ init_latents_proper = image_latents[:1]
1202
+ init_mask = mask[:1]
1203
+
1204
+ if i < len(timesteps) - 1:
1205
+ noise_timestep = timesteps[i + 1]
1206
+ init_latents_proper = self.scheduler.add_noise(
1207
+ init_latents_proper, noise, torch.tensor([noise_timestep])
1208
+ )
1209
+
1210
+ latents = (1 - init_mask) * init_latents_proper + init_mask * latents
1211
+
1212
+ # call the callback, if provided
1213
+ if i == len(timesteps) - 1 or ((i + 1) > num_warmup_steps and (i + 1) % self.scheduler.order == 0):
1214
+ progress_bar.update()
1215
+ if callback is not None and i % callback_steps == 0:
1216
+ callback(i, t, latents)
1217
+
1218
+ if not output_type == "latent":
1219
+ condition_kwargs = {}
1220
+ if isinstance(self.vae, AsymmetricAutoencoderKL):
1221
+ init_image = init_image.to(device=device, dtype=masked_image_latents.dtype)
1222
+ init_image_condition = init_image.clone()
1223
+ init_image = self._encode_vae_image(init_image, generator=generator)
1224
+ mask_condition = mask_condition.to(device=device, dtype=masked_image_latents.dtype)
1225
+ condition_kwargs = {"image": init_image_condition, "mask": mask_condition}
1226
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **condition_kwargs)[0]
1227
+ image, has_nsfw_concept = self.run_safety_checker(image, device, prompt_embeds.dtype)
1228
+ else:
1229
+ image = latents
1230
+ has_nsfw_concept = None
1231
+
1232
+ if has_nsfw_concept is None:
1233
+ do_denormalize = [True] * image.shape[0]
1234
+ else:
1235
+ do_denormalize = [not has_nsfw for has_nsfw in has_nsfw_concept]
1236
+
1237
+ image = self.image_processor.postprocess(image, output_type=output_type, do_denormalize=do_denormalize)
1238
+
1239
+ # Offload last model to CPU
1240
+ if hasattr(self, "final_offload_hook") and self.final_offload_hook is not None:
1241
+ self.final_offload_hook.offload()
1242
+
1243
+ if self.adaptive_mask_model.use_visualizer:
1244
+ generate_video_from_imgs(images_save_directory=visualization_save_dir, fps=10, delete_dir=True)
1245
+
1246
+ if not return_dict:
1247
+ return (image, has_nsfw_concept)
1248
+
1249
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=has_nsfw_concept)
1250
+
1251
+ def decode_to_npuint8_image(self, latents):
1252
+ image = self.vae.decode(latents / self.vae.config.scaling_factor, return_dict=False, **{})[
1253
+ 0
1254
+ ] # torch, float32, -1.~1.
1255
+ image = self.image_processor.postprocess(image, output_type="pt", do_denormalize=[True] * image.shape[0])
1256
+ image = (image.squeeze().permute(1, 2, 0).detach().cpu().numpy() * 255).astype(np.uint8) # np, uint8, 0~255
1257
+ return image
1258
+
1259
+ def register_adaptive_mask_settings(self):
1260
+ from easydict import EasyDict
1261
+
1262
+ num_steps = 50
1263
+
1264
+ step_num = int(num_steps * 0.1)
1265
+ final_step_num = num_steps - step_num * 7
1266
+ # adaptive mask settings
1267
+ self.adaptive_mask_settings = EasyDict(
1268
+ dilate_scheduler=MaskDilateScheduler(
1269
+ max_dilate_num=20,
1270
+ num_inference_steps=num_steps,
1271
+ schedule=[20] * step_num
1272
+ + [10] * step_num
1273
+ + [5] * step_num
1274
+ + [4] * step_num
1275
+ + [3] * step_num
1276
+ + [2] * step_num
1277
+ + [1] * step_num
1278
+ + [0] * final_step_num,
1279
+ ),
1280
+ dilate_kernel=np.ones((3, 3), dtype=np.uint8),
1281
+ provoke_scheduler=ProvokeScheduler(
1282
+ num_inference_steps=num_steps,
1283
+ schedule=list(range(2, 10 + 1, 2)) + list(range(12, 40 + 1, 2)) + [45],
1284
+ is_zero_indexing=False,
1285
+ ),
1286
+ )
1287
+
1288
+ def register_adaptive_mask_model(self):
1289
+ # declare segmentation model used for mask adaptation
1290
+ use_visualizer = True
1291
+ # assert not use_visualizer, \
1292
+ # """
1293
+ # If you plan to 'use_visualizer', USE WITH CAUTION.
1294
+ # It creates a directory of images and masks, which is used for merging into a video.
1295
+ # The procedure involves deleting the directory of images, which means that
1296
+ # if you set the directory wrong you can have other important files blown away.
1297
+ # """
1298
+
1299
+ self.adaptive_mask_model = PointRendPredictor(
1300
+ # pointrend_thres=0.2,
1301
+ pointrend_thres=0.9,
1302
+ device="cuda" if torch.cuda.is_available() else "cpu",
1303
+ use_visualizer=use_visualizer,
1304
+ config_pth="pointrend_rcnn_R_50_FPN_3x_coco.yaml",
1305
+ weights_pth="model_final_edd263.pkl",
1306
+ )
1307
+
1308
+ def adapt_mask(self, init_image, pred_orig_image, default_mask_image, dilate_num, use_default_mask, **kwargs):
1309
+ ## predict mask to use for adaptation
1310
+ adapt_output = self.adaptive_mask_model(pred_orig_image) # vis can be None if 'use_visualizer' is False
1311
+ mask = adapt_output["mask"]
1312
+ vis = adapt_output["vis"]
1313
+
1314
+ ## if mask is empty or too small, use default_mask_image. else, use dilate and intersect with default_mask_image
1315
+ if use_default_mask or mask.sum() < 512 * 512 * kwargs["human_detection_thres"]: # 0.005
1316
+ # set mask as default mask
1317
+ mask = default_mask_image # HxW
1318
+
1319
+ else:
1320
+ ## timestep-adaptive mask
1321
+ mask = cv2.dilate(
1322
+ mask, self.adaptive_mask_settings.dilate_kernel, iterations=dilate_num
1323
+ ) # dilate_kernel: np.ones((3,3), np.uint8)
1324
+ mask = np.logical_and(mask, default_mask_image) # HxW
1325
+
1326
+ ## prepare mask as pt tensor format
1327
+ mask = torch.tensor(mask, dtype=torch.float32).to(kwargs["device"])[None, None] # 1 x 1 x H x W
1328
+ mask, masked_image = prepare_mask_and_masked_image(
1329
+ init_image.to(kwargs["device"]), mask, kwargs["height"], kwargs["width"], return_image=False
1330
+ )
1331
+
1332
+ mask_image_np = mask.clone().squeeze().detach().cpu().numpy()
1333
+
1334
+ mask, masked_image_latents = self.prepare_mask_latents(
1335
+ mask,
1336
+ masked_image,
1337
+ kwargs["batch_size"] * kwargs["num_images_per_prompt"],
1338
+ kwargs["height"],
1339
+ kwargs["width"],
1340
+ kwargs["prompt_embeds"].dtype,
1341
+ kwargs["device"],
1342
+ kwargs["generator"],
1343
+ kwargs["do_classifier_free_guidance"],
1344
+ )
1345
+
1346
+ return mask, masked_image_latents, mask_image_np, vis
1347
+
1348
+
1349
+ def seg2bbox(seg_mask: np.ndarray):
1350
+ nonzero_i, nonzero_j = seg_mask.nonzero()
1351
+ min_i, max_i = nonzero_i.min(), nonzero_i.max()
1352
+ min_j, max_j = nonzero_j.min(), nonzero_j.max()
1353
+
1354
+ return np.array([min_j, min_i, max_j + 1, max_i + 1])
1355
+
1356
+
1357
+ def merge_bbox(bboxes: list):
1358
+ assert len(bboxes) > 0
1359
+
1360
+ all_bboxes = np.stack(bboxes, axis=0) # shape: N_bbox X 4
1361
+ merged_bbox = np.zeros_like(all_bboxes[0]) # shape: 4,
1362
+
1363
+ merged_bbox[0] = all_bboxes[:, 0].min()
1364
+ merged_bbox[1] = all_bboxes[:, 1].min()
1365
+ merged_bbox[2] = all_bboxes[:, 2].max()
1366
+ merged_bbox[3] = all_bboxes[:, 3].max()
1367
+
1368
+ return merged_bbox
1369
+
1370
+
1371
+ class PointRendPredictor:
1372
+ def __init__(
1373
+ self,
1374
+ cat_id_to_focus=0,
1375
+ pointrend_thres=0.9,
1376
+ device="cuda",
1377
+ use_visualizer=False,
1378
+ merge_mode="merge",
1379
+ config_pth=None,
1380
+ weights_pth=None,
1381
+ ):
1382
+ super().__init__()
1383
+
1384
+ # category id to focus (default: 0, which is human)
1385
+ self.cat_id_to_focus = cat_id_to_focus
1386
+
1387
+ # setup coco metadata
1388
+ self.coco_metadata = MetadataCatalog.get("coco_2017_val")
1389
+ self.cfg = get_cfg()
1390
+
1391
+ # get segmentation model config
1392
+ point_rend.add_pointrend_config(self.cfg) # --> Add PointRend-specific config
1393
+ self.cfg.merge_from_file(config_pth)
1394
+ self.cfg.MODEL.WEIGHTS = weights_pth
1395
+ self.cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = pointrend_thres
1396
+ self.cfg.MODEL.DEVICE = device
1397
+
1398
+ # get segmentation model
1399
+ self.pointrend_seg_model = DefaultPredictor(self.cfg)
1400
+
1401
+ # settings for visualizer
1402
+ self.use_visualizer = use_visualizer
1403
+
1404
+ # mask-merge mode
1405
+ assert merge_mode in ["merge", "max-confidence"], f"'merge_mode': {merge_mode} not implemented."
1406
+ self.merge_mode = merge_mode
1407
+
1408
+ def merge_mask(self, masks, scores=None):
1409
+ if self.merge_mode == "merge":
1410
+ mask = np.any(masks, axis=0)
1411
+ elif self.merge_mode == "max-confidence":
1412
+ mask = masks[np.argmax(scores)]
1413
+ return mask
1414
+
1415
+ def vis_seg_on_img(self, image, mask):
1416
+ if type(mask) == np.ndarray:
1417
+ mask = torch.tensor(mask)
1418
+ v = Visualizer(image, self.coco_metadata, scale=0.5, instance_mode=ColorMode.IMAGE_BW)
1419
+ instances = Instances(image_size=image.shape[:2], pred_masks=mask if len(mask.shape) == 3 else mask[None])
1420
+ vis = v.draw_instance_predictions(instances.to("cpu")).get_image()
1421
+ return vis
1422
+
1423
+ def __call__(self, image):
1424
+ # run segmentation
1425
+ outputs = self.pointrend_seg_model(image)
1426
+ instances = outputs["instances"]
1427
+
1428
+ # merge instances for the category-id to focus
1429
+ is_class = instances.pred_classes == self.cat_id_to_focus
1430
+ masks = instances.pred_masks[is_class]
1431
+ masks = masks.detach().cpu().numpy() # [N, img_size, img_size]
1432
+ mask = self.merge_mask(masks, scores=instances.scores[is_class])
1433
+
1434
+ return {
1435
+ "asset_mask": None,
1436
+ "mask": mask.astype(np.uint8),
1437
+ "vis": self.vis_seg_on_img(image, mask) if self.use_visualizer else None,
1438
+ }
1439
+
1440
+
1441
+ class MaskDilateScheduler:
1442
+ def __init__(self, max_dilate_num=15, num_inference_steps=50, schedule=None):
1443
+ super().__init__()
1444
+ self.max_dilate_num = max_dilate_num
1445
+ self.schedule = [num_inference_steps - i for i in range(num_inference_steps)] if schedule is None else schedule
1446
+ assert len(self.schedule) == num_inference_steps
1447
+
1448
+ def __call__(self, i):
1449
+ return min(self.max_dilate_num, self.schedule[i])
1450
+
1451
+
1452
+ class ProvokeScheduler:
1453
+ def __init__(self, num_inference_steps=50, schedule=None, is_zero_indexing=False):
1454
+ super().__init__()
1455
+ if len(schedule) > 0:
1456
+ if is_zero_indexing:
1457
+ assert max(schedule) <= num_inference_steps - 1
1458
+ else:
1459
+ assert max(schedule) <= num_inference_steps
1460
+
1461
+ # register as self
1462
+ self.is_zero_indexing = is_zero_indexing
1463
+ self.schedule = schedule
1464
+
1465
+ def __call__(self, i):
1466
+ if self.is_zero_indexing:
1467
+ return i in self.schedule
1468
+ else:
1469
+ return i + 1 in self.schedule
exp_code/1_benchmark/diffusers-WanS2V/examples/community/bit_diffusion.py ADDED
@@ -0,0 +1,264 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import Optional, Tuple, Union
2
+
3
+ import torch
4
+ from einops import rearrange, reduce
5
+
6
+ from diffusers import DDIMScheduler, DDPMScheduler, DiffusionPipeline, ImagePipelineOutput, UNet2DConditionModel
7
+ from diffusers.schedulers.scheduling_ddim import DDIMSchedulerOutput
8
+ from diffusers.schedulers.scheduling_ddpm import DDPMSchedulerOutput
9
+
10
+
11
+ BITS = 8
12
+
13
+
14
+ # convert to bit representations and back taken from https://github.com/lucidrains/bit-diffusion/blob/main/bit_diffusion/bit_diffusion.py
15
+ def decimal_to_bits(x, bits=BITS):
16
+ """expects image tensor ranging from 0 to 1, outputs bit tensor ranging from -1 to 1"""
17
+ device = x.device
18
+
19
+ x = (x * 255).int().clamp(0, 255)
20
+
21
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device)
22
+ mask = rearrange(mask, "d -> d 1 1")
23
+ x = rearrange(x, "b c h w -> b c 1 h w")
24
+
25
+ bits = ((x & mask) != 0).float()
26
+ bits = rearrange(bits, "b c d h w -> b (c d) h w")
27
+ bits = bits * 2 - 1
28
+ return bits
29
+
30
+
31
+ def bits_to_decimal(x, bits=BITS):
32
+ """expects bits from -1 to 1, outputs image tensor from 0 to 1"""
33
+ device = x.device
34
+
35
+ x = (x > 0).int()
36
+ mask = 2 ** torch.arange(bits - 1, -1, -1, device=device, dtype=torch.int32)
37
+
38
+ mask = rearrange(mask, "d -> d 1 1")
39
+ x = rearrange(x, "b (c d) h w -> b c d h w", d=8)
40
+ dec = reduce(x * mask, "b c d h w -> b c h w", "sum")
41
+ return (dec / 255).clamp(0.0, 1.0)
42
+
43
+
44
+ # modified scheduler step functions for clamping the predicted x_0 between -bit_scale and +bit_scale
45
+ def ddim_bit_scheduler_step(
46
+ self,
47
+ model_output: torch.Tensor,
48
+ timestep: int,
49
+ sample: torch.Tensor,
50
+ eta: float = 0.0,
51
+ use_clipped_model_output: bool = True,
52
+ generator=None,
53
+ return_dict: bool = True,
54
+ ) -> Union[DDIMSchedulerOutput, Tuple]:
55
+ """
56
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
57
+ process from the learned model outputs (most often the predicted noise).
58
+ Args:
59
+ model_output (`torch.Tensor`): direct output from learned diffusion model.
60
+ timestep (`int`): current discrete timestep in the diffusion chain.
61
+ sample (`torch.Tensor`):
62
+ current instance of sample being created by diffusion process.
63
+ eta (`float`): weight of noise for added noise in diffusion step.
64
+ use_clipped_model_output (`bool`): TODO
65
+ generator: random number generator.
66
+ return_dict (`bool`): option for returning tuple rather than DDIMSchedulerOutput class
67
+ Returns:
68
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] or `tuple`:
69
+ [`~schedulers.scheduling_utils.DDIMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
70
+ returning a tuple, the first element is the sample tensor.
71
+ """
72
+ if self.num_inference_steps is None:
73
+ raise ValueError(
74
+ "Number of inference steps is 'None', you need to run 'set_timesteps' after creating the scheduler"
75
+ )
76
+
77
+ # See formulas (12) and (16) of DDIM paper https://huggingface.co/papers/2010.02502
78
+ # Ideally, read DDIM paper in-detail understanding
79
+
80
+ # Notation (<variable name> -> <name in paper>
81
+ # - pred_noise_t -> e_theta(x_t, t)
82
+ # - pred_original_sample -> f_theta(x_t, t) or x_0
83
+ # - std_dev_t -> sigma_t
84
+ # - eta -> η
85
+ # - pred_sample_direction -> "direction pointing to x_t"
86
+ # - pred_prev_sample -> "x_t-1"
87
+
88
+ # 1. get previous step value (=t-1)
89
+ prev_timestep = timestep - self.config.num_train_timesteps // self.num_inference_steps
90
+
91
+ # 2. compute alphas, betas
92
+ alpha_prod_t = self.alphas_cumprod[timestep]
93
+ alpha_prod_t_prev = self.alphas_cumprod[prev_timestep] if prev_timestep >= 0 else self.final_alpha_cumprod
94
+
95
+ beta_prod_t = 1 - alpha_prod_t
96
+
97
+ # 3. compute predicted original sample from predicted noise also called
98
+ # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
99
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
100
+
101
+ # 4. Clip "predicted x_0"
102
+ scale = self.bit_scale
103
+ if self.config.clip_sample:
104
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
105
+
106
+ # 5. compute variance: "sigma_t(η)" -> see formula (16)
107
+ # σ_t = sqrt((1 − α_t−1)/(1 − α_t)) * sqrt(1 − α_t/α_t−1)
108
+ variance = self._get_variance(timestep, prev_timestep)
109
+ std_dev_t = eta * variance ** (0.5)
110
+
111
+ if use_clipped_model_output:
112
+ # the model_output is always re-derived from the clipped x_0 in Glide
113
+ model_output = (sample - alpha_prod_t ** (0.5) * pred_original_sample) / beta_prod_t ** (0.5)
114
+
115
+ # 6. compute "direction pointing to x_t" of formula (12) from https://huggingface.co/papers/2010.02502
116
+ pred_sample_direction = (1 - alpha_prod_t_prev - std_dev_t**2) ** (0.5) * model_output
117
+
118
+ # 7. compute x_t without "random noise" of formula (12) from https://huggingface.co/papers/2010.02502
119
+ prev_sample = alpha_prod_t_prev ** (0.5) * pred_original_sample + pred_sample_direction
120
+
121
+ if eta > 0:
122
+ # randn_like does not support generator https://github.com/pytorch/pytorch/issues/27072
123
+ device = model_output.device if torch.is_tensor(model_output) else "cpu"
124
+ noise = torch.randn(model_output.shape, dtype=model_output.dtype, generator=generator).to(device)
125
+ variance = self._get_variance(timestep, prev_timestep) ** (0.5) * eta * noise
126
+
127
+ prev_sample = prev_sample + variance
128
+
129
+ if not return_dict:
130
+ return (prev_sample,)
131
+
132
+ return DDIMSchedulerOutput(prev_sample=prev_sample, pred_original_sample=pred_original_sample)
133
+
134
+
135
+ def ddpm_bit_scheduler_step(
136
+ self,
137
+ model_output: torch.Tensor,
138
+ timestep: int,
139
+ sample: torch.Tensor,
140
+ prediction_type="epsilon",
141
+ generator=None,
142
+ return_dict: bool = True,
143
+ ) -> Union[DDPMSchedulerOutput, Tuple]:
144
+ """
145
+ Predict the sample at the previous timestep by reversing the SDE. Core function to propagate the diffusion
146
+ process from the learned model outputs (most often the predicted noise).
147
+ Args:
148
+ model_output (`torch.Tensor`): direct output from learned diffusion model.
149
+ timestep (`int`): current discrete timestep in the diffusion chain.
150
+ sample (`torch.Tensor`):
151
+ current instance of sample being created by diffusion process.
152
+ prediction_type (`str`, default `epsilon`):
153
+ indicates whether the model predicts the noise (epsilon), or the samples (`sample`).
154
+ generator: random number generator.
155
+ return_dict (`bool`): option for returning tuple rather than DDPMSchedulerOutput class
156
+ Returns:
157
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] or `tuple`:
158
+ [`~schedulers.scheduling_utils.DDPMSchedulerOutput`] if `return_dict` is True, otherwise a `tuple`. When
159
+ returning a tuple, the first element is the sample tensor.
160
+ """
161
+ t = timestep
162
+
163
+ if model_output.shape[1] == sample.shape[1] * 2 and self.variance_type in ["learned", "learned_range"]:
164
+ model_output, predicted_variance = torch.split(model_output, sample.shape[1], dim=1)
165
+ else:
166
+ predicted_variance = None
167
+
168
+ # 1. compute alphas, betas
169
+ alpha_prod_t = self.alphas_cumprod[t]
170
+ alpha_prod_t_prev = self.alphas_cumprod[t - 1] if t > 0 else self.one
171
+ beta_prod_t = 1 - alpha_prod_t
172
+ beta_prod_t_prev = 1 - alpha_prod_t_prev
173
+
174
+ # 2. compute predicted original sample from predicted noise also called
175
+ # "predicted x_0" of formula (15) from https://huggingface.co/papers/2006.11239
176
+ if prediction_type == "epsilon":
177
+ pred_original_sample = (sample - beta_prod_t ** (0.5) * model_output) / alpha_prod_t ** (0.5)
178
+ elif prediction_type == "sample":
179
+ pred_original_sample = model_output
180
+ else:
181
+ raise ValueError(f"Unsupported prediction_type {prediction_type}.")
182
+
183
+ # 3. Clip "predicted x_0"
184
+ scale = self.bit_scale
185
+ if self.config.clip_sample:
186
+ pred_original_sample = torch.clamp(pred_original_sample, -scale, scale)
187
+
188
+ # 4. Compute coefficients for pred_original_sample x_0 and current sample x_t
189
+ # See formula (7) from https://huggingface.co/papers/2006.11239
190
+ pred_original_sample_coeff = (alpha_prod_t_prev ** (0.5) * self.betas[t]) / beta_prod_t
191
+ current_sample_coeff = self.alphas[t] ** (0.5) * beta_prod_t_prev / beta_prod_t
192
+
193
+ # 5. Compute predicted previous sample µ_t
194
+ # See formula (7) from https://huggingface.co/papers/2006.11239
195
+ pred_prev_sample = pred_original_sample_coeff * pred_original_sample + current_sample_coeff * sample
196
+
197
+ # 6. Add noise
198
+ variance = 0
199
+ if t > 0:
200
+ noise = torch.randn(
201
+ model_output.size(), dtype=model_output.dtype, layout=model_output.layout, generator=generator
202
+ ).to(model_output.device)
203
+ variance = (self._get_variance(t, predicted_variance=predicted_variance) ** 0.5) * noise
204
+
205
+ pred_prev_sample = pred_prev_sample + variance
206
+
207
+ if not return_dict:
208
+ return (pred_prev_sample,)
209
+
210
+ return DDPMSchedulerOutput(prev_sample=pred_prev_sample, pred_original_sample=pred_original_sample)
211
+
212
+
213
+ class BitDiffusion(DiffusionPipeline):
214
+ def __init__(
215
+ self,
216
+ unet: UNet2DConditionModel,
217
+ scheduler: Union[DDIMScheduler, DDPMScheduler],
218
+ bit_scale: Optional[float] = 1.0,
219
+ ):
220
+ super().__init__()
221
+ self.bit_scale = bit_scale
222
+ self.scheduler.step = (
223
+ ddim_bit_scheduler_step if isinstance(scheduler, DDIMScheduler) else ddpm_bit_scheduler_step
224
+ )
225
+
226
+ self.register_modules(unet=unet, scheduler=scheduler)
227
+
228
+ @torch.no_grad()
229
+ def __call__(
230
+ self,
231
+ height: Optional[int] = 256,
232
+ width: Optional[int] = 256,
233
+ num_inference_steps: Optional[int] = 50,
234
+ generator: Optional[torch.Generator] = None,
235
+ batch_size: Optional[int] = 1,
236
+ output_type: Optional[str] = "pil",
237
+ return_dict: bool = True,
238
+ **kwargs,
239
+ ) -> Union[Tuple, ImagePipelineOutput]:
240
+ latents = torch.randn(
241
+ (batch_size, self.unet.config.in_channels, height, width),
242
+ generator=generator,
243
+ )
244
+ latents = decimal_to_bits(latents) * self.bit_scale
245
+ latents = latents.to(self.device)
246
+
247
+ self.scheduler.set_timesteps(num_inference_steps)
248
+
249
+ for t in self.progress_bar(self.scheduler.timesteps):
250
+ # predict the noise residual
251
+ noise_pred = self.unet(latents, t).sample
252
+
253
+ # compute the previous noisy sample x_t -> x_t-1
254
+ latents = self.scheduler.step(noise_pred, t, latents).prev_sample
255
+
256
+ image = bits_to_decimal(latents)
257
+
258
+ if output_type == "pil":
259
+ image = self.numpy_to_pil(image)
260
+
261
+ if not return_dict:
262
+ return (image,)
263
+
264
+ return ImagePipelineOutput(images=image)
exp_code/1_benchmark/diffusers-WanS2V/examples/community/checkpoint_merger.py ADDED
@@ -0,0 +1,288 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import glob
2
+ import os
3
+ from typing import Dict, List, Union
4
+
5
+ import safetensors.torch
6
+ import torch
7
+ from huggingface_hub import snapshot_download
8
+ from huggingface_hub.utils import validate_hf_hub_args
9
+
10
+ from diffusers import DiffusionPipeline, __version__
11
+ from diffusers.schedulers.scheduling_utils import SCHEDULER_CONFIG_NAME
12
+ from diffusers.utils import CONFIG_NAME, ONNX_WEIGHTS_NAME, WEIGHTS_NAME
13
+
14
+
15
+ class CheckpointMergerPipeline(DiffusionPipeline):
16
+ """
17
+ A class that supports merging diffusion models based on the discussion here:
18
+ https://github.com/huggingface/diffusers/issues/877
19
+
20
+ Example usage:-
21
+
22
+ pipe = DiffusionPipeline.from_pretrained("CompVis/stable-diffusion-v1-4", custom_pipeline="checkpoint_merger.py")
23
+
24
+ merged_pipe = pipe.merge(["CompVis/stable-diffusion-v1-4","prompthero/openjourney"], interp = 'inv_sigmoid', alpha = 0.8, force = True)
25
+
26
+ merged_pipe.to('cuda')
27
+
28
+ prompt = "An astronaut riding a unicycle on Mars"
29
+
30
+ results = merged_pipe(prompt)
31
+
32
+ ## For more details, see the docstring for the merge method.
33
+
34
+ """
35
+
36
+ def __init__(self):
37
+ self.register_to_config()
38
+ super().__init__()
39
+
40
+ def _compare_model_configs(self, dict0, dict1):
41
+ if dict0 == dict1:
42
+ return True
43
+ else:
44
+ config0, meta_keys0 = self._remove_meta_keys(dict0)
45
+ config1, meta_keys1 = self._remove_meta_keys(dict1)
46
+ if config0 == config1:
47
+ print(f"Warning !: Mismatch in keys {meta_keys0} and {meta_keys1}.")
48
+ return True
49
+ return False
50
+
51
+ def _remove_meta_keys(self, config_dict: Dict):
52
+ meta_keys = []
53
+ temp_dict = config_dict.copy()
54
+ for key in config_dict.keys():
55
+ if key.startswith("_"):
56
+ temp_dict.pop(key)
57
+ meta_keys.append(key)
58
+ return (temp_dict, meta_keys)
59
+
60
+ @torch.no_grad()
61
+ @validate_hf_hub_args
62
+ def merge(self, pretrained_model_name_or_path_list: List[Union[str, os.PathLike]], **kwargs):
63
+ """
64
+ Returns a new pipeline object of the class 'DiffusionPipeline' with the merged checkpoints(weights) of the models passed
65
+ in the argument 'pretrained_model_name_or_path_list' as a list.
66
+
67
+ Parameters:
68
+ -----------
69
+ pretrained_model_name_or_path_list : A list of valid pretrained model names in the HuggingFace hub or paths to locally stored models in the HuggingFace format.
70
+
71
+ **kwargs:
72
+ Supports all the default DiffusionPipeline.get_config_dict kwargs viz..
73
+
74
+ cache_dir, force_download, proxies, local_files_only, token, revision, torch_dtype, device_map.
75
+
76
+ alpha - The interpolation parameter. Ranges from 0 to 1. It affects the ratio in which the checkpoints are merged. A 0.8 alpha
77
+ would mean that the first model checkpoints would affect the final result far less than an alpha of 0.2
78
+
79
+ interp - The interpolation method to use for the merging. Supports "sigmoid", "inv_sigmoid", "add_diff" and None.
80
+ Passing None uses the default interpolation which is weighted sum interpolation. For merging three checkpoints, only "add_diff" is supported.
81
+
82
+ force - Whether to ignore mismatch in model_config.json for the current models. Defaults to False.
83
+
84
+ variant - which variant of a pretrained model to load, e.g. "fp16" (None)
85
+
86
+ """
87
+ # Default kwargs from DiffusionPipeline
88
+ cache_dir = kwargs.pop("cache_dir", None)
89
+ force_download = kwargs.pop("force_download", False)
90
+ proxies = kwargs.pop("proxies", None)
91
+ local_files_only = kwargs.pop("local_files_only", False)
92
+ token = kwargs.pop("token", None)
93
+ variant = kwargs.pop("variant", None)
94
+ revision = kwargs.pop("revision", None)
95
+ torch_dtype = kwargs.pop("torch_dtype", torch.float32)
96
+ device_map = kwargs.pop("device_map", None)
97
+
98
+ if not isinstance(torch_dtype, torch.dtype):
99
+ torch_dtype = torch.float32
100
+ print(f"Passed `torch_dtype` {torch_dtype} is not a `torch.dtype`. Defaulting to `torch.float32`.")
101
+
102
+ alpha = kwargs.pop("alpha", 0.5)
103
+ interp = kwargs.pop("interp", None)
104
+
105
+ print("Received list", pretrained_model_name_or_path_list)
106
+ print(f"Combining with alpha={alpha}, interpolation mode={interp}")
107
+
108
+ checkpoint_count = len(pretrained_model_name_or_path_list)
109
+ # Ignore result from model_index_json comparison of the two checkpoints
110
+ force = kwargs.pop("force", False)
111
+
112
+ # If less than 2 checkpoints, nothing to merge. If more than 3, not supported for now.
113
+ if checkpoint_count > 3 or checkpoint_count < 2:
114
+ raise ValueError(
115
+ "Received incorrect number of checkpoints to merge. Ensure that either 2 or 3 checkpoints are being"
116
+ " passed."
117
+ )
118
+
119
+ print("Received the right number of checkpoints")
120
+ # chkpt0, chkpt1 = pretrained_model_name_or_path_list[0:2]
121
+ # chkpt2 = pretrained_model_name_or_path_list[2] if checkpoint_count == 3 else None
122
+
123
+ # Validate that the checkpoints can be merged
124
+ # Step 1: Load the model config and compare the checkpoints. We'll compare the model_index.json first while ignoring the keys starting with '_'
125
+ config_dicts = []
126
+ for pretrained_model_name_or_path in pretrained_model_name_or_path_list:
127
+ config_dict = DiffusionPipeline.load_config(
128
+ pretrained_model_name_or_path,
129
+ cache_dir=cache_dir,
130
+ force_download=force_download,
131
+ proxies=proxies,
132
+ local_files_only=local_files_only,
133
+ token=token,
134
+ revision=revision,
135
+ )
136
+ config_dicts.append(config_dict)
137
+
138
+ comparison_result = True
139
+ for idx in range(1, len(config_dicts)):
140
+ comparison_result &= self._compare_model_configs(config_dicts[idx - 1], config_dicts[idx])
141
+ if not force and comparison_result is False:
142
+ raise ValueError("Incompatible checkpoints. Please check model_index.json for the models.")
143
+ print("Compatible model_index.json files found")
144
+ # Step 2: Basic Validation has succeeded. Let's download the models and save them into our local files.
145
+ cached_folders = []
146
+ for pretrained_model_name_or_path, config_dict in zip(pretrained_model_name_or_path_list, config_dicts):
147
+ folder_names = [k for k in config_dict.keys() if not k.startswith("_")]
148
+ allow_patterns = [os.path.join(k, "*") for k in folder_names]
149
+ allow_patterns += [
150
+ WEIGHTS_NAME,
151
+ SCHEDULER_CONFIG_NAME,
152
+ CONFIG_NAME,
153
+ ONNX_WEIGHTS_NAME,
154
+ DiffusionPipeline.config_name,
155
+ ]
156
+ requested_pipeline_class = config_dict.get("_class_name")
157
+ user_agent = {"diffusers": __version__, "pipeline_class": requested_pipeline_class}
158
+
159
+ cached_folder = (
160
+ pretrained_model_name_or_path
161
+ if os.path.isdir(pretrained_model_name_or_path)
162
+ else snapshot_download(
163
+ pretrained_model_name_or_path,
164
+ cache_dir=cache_dir,
165
+ proxies=proxies,
166
+ local_files_only=local_files_only,
167
+ token=token,
168
+ revision=revision,
169
+ allow_patterns=allow_patterns,
170
+ user_agent=user_agent,
171
+ )
172
+ )
173
+ print("Cached Folder", cached_folder)
174
+ cached_folders.append(cached_folder)
175
+
176
+ # Step 3:-
177
+ # Load the first checkpoint as a diffusion pipeline and modify its module state_dict in place
178
+ final_pipe = DiffusionPipeline.from_pretrained(
179
+ cached_folders[0],
180
+ torch_dtype=torch_dtype,
181
+ device_map=device_map,
182
+ variant=variant,
183
+ )
184
+ final_pipe.to(self.device)
185
+
186
+ checkpoint_path_2 = None
187
+ if len(cached_folders) > 2:
188
+ checkpoint_path_2 = os.path.join(cached_folders[2])
189
+
190
+ if interp == "sigmoid":
191
+ theta_func = CheckpointMergerPipeline.sigmoid
192
+ elif interp == "inv_sigmoid":
193
+ theta_func = CheckpointMergerPipeline.inv_sigmoid
194
+ elif interp == "add_diff":
195
+ theta_func = CheckpointMergerPipeline.add_difference
196
+ else:
197
+ theta_func = CheckpointMergerPipeline.weighted_sum
198
+
199
+ # Find each module's state dict.
200
+ for attr in final_pipe.config.keys():
201
+ if not attr.startswith("_"):
202
+ checkpoint_path_1 = os.path.join(cached_folders[1], attr)
203
+ if os.path.exists(checkpoint_path_1):
204
+ files = [
205
+ *glob.glob(os.path.join(checkpoint_path_1, "*.safetensors")),
206
+ *glob.glob(os.path.join(checkpoint_path_1, "*.bin")),
207
+ ]
208
+ checkpoint_path_1 = files[0] if len(files) > 0 else None
209
+ if len(cached_folders) < 3:
210
+ checkpoint_path_2 = None
211
+ else:
212
+ checkpoint_path_2 = os.path.join(cached_folders[2], attr)
213
+ if os.path.exists(checkpoint_path_2):
214
+ files = [
215
+ *glob.glob(os.path.join(checkpoint_path_2, "*.safetensors")),
216
+ *glob.glob(os.path.join(checkpoint_path_2, "*.bin")),
217
+ ]
218
+ checkpoint_path_2 = files[0] if len(files) > 0 else None
219
+ # For an attr if both checkpoint_path_1 and 2 are None, ignore.
220
+ # If at least one is present, deal with it according to interp method, of course only if the state_dict keys match.
221
+ if checkpoint_path_1 is None and checkpoint_path_2 is None:
222
+ print(f"Skipping {attr}: not present in 2nd or 3d model")
223
+ continue
224
+ try:
225
+ module = getattr(final_pipe, attr)
226
+ if isinstance(module, bool): # ignore requires_safety_checker boolean
227
+ continue
228
+ theta_0 = getattr(module, "state_dict")
229
+ theta_0 = theta_0()
230
+
231
+ update_theta_0 = getattr(module, "load_state_dict")
232
+ theta_1 = (
233
+ safetensors.torch.load_file(checkpoint_path_1)
234
+ if (checkpoint_path_1.endswith(".safetensors"))
235
+ else torch.load(checkpoint_path_1, map_location="cpu")
236
+ )
237
+ theta_2 = None
238
+ if checkpoint_path_2:
239
+ theta_2 = (
240
+ safetensors.torch.load_file(checkpoint_path_2)
241
+ if (checkpoint_path_2.endswith(".safetensors"))
242
+ else torch.load(checkpoint_path_2, map_location="cpu")
243
+ )
244
+
245
+ if not theta_0.keys() == theta_1.keys():
246
+ print(f"Skipping {attr}: key mismatch")
247
+ continue
248
+ if theta_2 and not theta_1.keys() == theta_2.keys():
249
+ print(f"Skipping {attr}:y mismatch")
250
+ except Exception as e:
251
+ print(f"Skipping {attr} do to an unexpected error: {str(e)}")
252
+ continue
253
+ print(f"MERGING {attr}")
254
+
255
+ for key in theta_0.keys():
256
+ if theta_2:
257
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], theta_2[key], alpha)
258
+ else:
259
+ theta_0[key] = theta_func(theta_0[key], theta_1[key], None, alpha)
260
+
261
+ del theta_1
262
+ del theta_2
263
+ update_theta_0(theta_0)
264
+
265
+ del theta_0
266
+ return final_pipe
267
+
268
+ @staticmethod
269
+ def weighted_sum(theta0, theta1, theta2, alpha):
270
+ return ((1 - alpha) * theta0) + (alpha * theta1)
271
+
272
+ # Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
273
+ @staticmethod
274
+ def sigmoid(theta0, theta1, theta2, alpha):
275
+ alpha = alpha * alpha * (3 - (2 * alpha))
276
+ return theta0 + ((theta1 - theta0) * alpha)
277
+
278
+ # Inverse Smoothstep (https://en.wikipedia.org/wiki/Smoothstep)
279
+ @staticmethod
280
+ def inv_sigmoid(theta0, theta1, theta2, alpha):
281
+ import math
282
+
283
+ alpha = 0.5 - math.sin(math.asin(1.0 - 2.0 * alpha) / 3.0)
284
+ return theta0 + ((theta1 - theta0) * alpha)
285
+
286
+ @staticmethod
287
+ def add_difference(theta0, theta1, theta2, alpha):
288
+ return theta0 + (theta1 - theta2) * (1.0 - alpha)
exp_code/1_benchmark/diffusers-WanS2V/examples/community/clip_guided_images_mixing_stable_diffusion.py ADDED
@@ -0,0 +1,445 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ # -*- coding: utf-8 -*-
2
+ import inspect
3
+ from typing import Optional, Union
4
+
5
+ import numpy as np
6
+ import PIL.Image
7
+ import torch
8
+ from torch.nn import functional as F
9
+ from torchvision import transforms
10
+ from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
11
+
12
+ from diffusers import (
13
+ AutoencoderKL,
14
+ DDIMScheduler,
15
+ DPMSolverMultistepScheduler,
16
+ LMSDiscreteScheduler,
17
+ PNDMScheduler,
18
+ UNet2DConditionModel,
19
+ )
20
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
21
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
22
+ from diffusers.utils import PIL_INTERPOLATION
23
+ from diffusers.utils.torch_utils import randn_tensor
24
+
25
+
26
+ def preprocess(image, w, h):
27
+ if isinstance(image, torch.Tensor):
28
+ return image
29
+ elif isinstance(image, PIL.Image.Image):
30
+ image = [image]
31
+
32
+ if isinstance(image[0], PIL.Image.Image):
33
+ image = [np.array(i.resize((w, h), resample=PIL_INTERPOLATION["lanczos"]))[None, :] for i in image]
34
+ image = np.concatenate(image, axis=0)
35
+ image = np.array(image).astype(np.float32) / 255.0
36
+ image = image.transpose(0, 3, 1, 2)
37
+ image = 2.0 * image - 1.0
38
+ image = torch.from_numpy(image)
39
+ elif isinstance(image[0], torch.Tensor):
40
+ image = torch.cat(image, dim=0)
41
+ return image
42
+
43
+
44
+ def slerp(t, v0, v1, DOT_THRESHOLD=0.9995):
45
+ if not isinstance(v0, np.ndarray):
46
+ inputs_are_torch = True
47
+ input_device = v0.device
48
+ v0 = v0.cpu().numpy()
49
+ v1 = v1.cpu().numpy()
50
+
51
+ dot = np.sum(v0 * v1 / (np.linalg.norm(v0) * np.linalg.norm(v1)))
52
+ if np.abs(dot) > DOT_THRESHOLD:
53
+ v2 = (1 - t) * v0 + t * v1
54
+ else:
55
+ theta_0 = np.arccos(dot)
56
+ sin_theta_0 = np.sin(theta_0)
57
+ theta_t = theta_0 * t
58
+ sin_theta_t = np.sin(theta_t)
59
+ s0 = np.sin(theta_0 - theta_t) / sin_theta_0
60
+ s1 = sin_theta_t / sin_theta_0
61
+ v2 = s0 * v0 + s1 * v1
62
+
63
+ if inputs_are_torch:
64
+ v2 = torch.from_numpy(v2).to(input_device)
65
+
66
+ return v2
67
+
68
+
69
+ def spherical_dist_loss(x, y):
70
+ x = F.normalize(x, dim=-1)
71
+ y = F.normalize(y, dim=-1)
72
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
73
+
74
+
75
+ def set_requires_grad(model, value):
76
+ for param in model.parameters():
77
+ param.requires_grad = value
78
+
79
+
80
+ class CLIPGuidedImagesMixingStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
81
+ def __init__(
82
+ self,
83
+ vae: AutoencoderKL,
84
+ text_encoder: CLIPTextModel,
85
+ clip_model: CLIPModel,
86
+ tokenizer: CLIPTokenizer,
87
+ unet: UNet2DConditionModel,
88
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
89
+ feature_extractor: CLIPImageProcessor,
90
+ coca_model=None,
91
+ coca_tokenizer=None,
92
+ coca_transform=None,
93
+ ):
94
+ super().__init__()
95
+ self.register_modules(
96
+ vae=vae,
97
+ text_encoder=text_encoder,
98
+ clip_model=clip_model,
99
+ tokenizer=tokenizer,
100
+ unet=unet,
101
+ scheduler=scheduler,
102
+ feature_extractor=feature_extractor,
103
+ coca_model=coca_model,
104
+ coca_tokenizer=coca_tokenizer,
105
+ coca_transform=coca_transform,
106
+ )
107
+ self.feature_extractor_size = (
108
+ feature_extractor.size
109
+ if isinstance(feature_extractor.size, int)
110
+ else feature_extractor.size["shortest_edge"]
111
+ )
112
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
113
+ set_requires_grad(self.text_encoder, False)
114
+ set_requires_grad(self.clip_model, False)
115
+
116
+ def freeze_vae(self):
117
+ set_requires_grad(self.vae, False)
118
+
119
+ def unfreeze_vae(self):
120
+ set_requires_grad(self.vae, True)
121
+
122
+ def freeze_unet(self):
123
+ set_requires_grad(self.unet, False)
124
+
125
+ def unfreeze_unet(self):
126
+ set_requires_grad(self.unet, True)
127
+
128
+ def get_timesteps(self, num_inference_steps, strength, device):
129
+ # get the original timestep using init_timestep
130
+ init_timestep = min(int(num_inference_steps * strength), num_inference_steps)
131
+
132
+ t_start = max(num_inference_steps - init_timestep, 0)
133
+ timesteps = self.scheduler.timesteps[t_start:]
134
+
135
+ return timesteps, num_inference_steps - t_start
136
+
137
+ def prepare_latents(self, image, timestep, batch_size, dtype, device, generator=None):
138
+ if not isinstance(image, torch.Tensor):
139
+ raise ValueError(f"`image` has to be of type `torch.Tensor` but is {type(image)}")
140
+
141
+ image = image.to(device=device, dtype=dtype)
142
+
143
+ if isinstance(generator, list):
144
+ init_latents = [
145
+ self.vae.encode(image[i : i + 1]).latent_dist.sample(generator[i]) for i in range(batch_size)
146
+ ]
147
+ init_latents = torch.cat(init_latents, dim=0)
148
+ else:
149
+ init_latents = self.vae.encode(image).latent_dist.sample(generator)
150
+
151
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
152
+ init_latents = 0.18215 * init_latents
153
+ init_latents = init_latents.repeat_interleave(batch_size, dim=0)
154
+
155
+ noise = randn_tensor(init_latents.shape, generator=generator, device=device, dtype=dtype)
156
+
157
+ # get latents
158
+ init_latents = self.scheduler.add_noise(init_latents, noise, timestep)
159
+ latents = init_latents
160
+
161
+ return latents
162
+
163
+ def get_image_description(self, image):
164
+ transformed_image = self.coca_transform(image).unsqueeze(0)
165
+ with torch.no_grad(), torch.cuda.amp.autocast():
166
+ generated = self.coca_model.generate(transformed_image.to(device=self.device, dtype=self.coca_model.dtype))
167
+ generated = self.coca_tokenizer.decode(generated[0].cpu().numpy())
168
+ return generated.split("<end_of_text>")[0].replace("<start_of_text>", "").rstrip(" .,")
169
+
170
+ def get_clip_image_embeddings(self, image, batch_size):
171
+ clip_image_input = self.feature_extractor.preprocess(image)
172
+ clip_image_features = torch.from_numpy(clip_image_input["pixel_values"][0]).unsqueeze(0).to(self.device).half()
173
+ image_embeddings_clip = self.clip_model.get_image_features(clip_image_features)
174
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
175
+ image_embeddings_clip = image_embeddings_clip.repeat_interleave(batch_size, dim=0)
176
+ return image_embeddings_clip
177
+
178
+ @torch.enable_grad()
179
+ def cond_fn(
180
+ self,
181
+ latents,
182
+ timestep,
183
+ index,
184
+ text_embeddings,
185
+ noise_pred_original,
186
+ original_image_embeddings_clip,
187
+ clip_guidance_scale,
188
+ ):
189
+ latents = latents.detach().requires_grad_()
190
+
191
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
192
+
193
+ # predict the noise residual
194
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
195
+
196
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
197
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
198
+ beta_prod_t = 1 - alpha_prod_t
199
+ # compute predicted original sample from predicted noise also called
200
+ # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
201
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
202
+
203
+ fac = torch.sqrt(beta_prod_t)
204
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
205
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
206
+ sigma = self.scheduler.sigmas[index]
207
+ sample = latents - sigma * noise_pred
208
+ else:
209
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
210
+
211
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
212
+ sample = 1 / 0.18215 * sample
213
+ image = self.vae.decode(sample).sample
214
+ image = (image / 2 + 0.5).clamp(0, 1)
215
+
216
+ image = transforms.Resize(self.feature_extractor_size)(image)
217
+ image = self.normalize(image).to(latents.dtype)
218
+
219
+ image_embeddings_clip = self.clip_model.get_image_features(image)
220
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
221
+
222
+ loss = spherical_dist_loss(image_embeddings_clip, original_image_embeddings_clip).mean() * clip_guidance_scale
223
+
224
+ grads = -torch.autograd.grad(loss, latents)[0]
225
+
226
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
227
+ latents = latents.detach() + grads * (sigma**2)
228
+ noise_pred = noise_pred_original
229
+ else:
230
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
231
+ return noise_pred, latents
232
+
233
+ @torch.no_grad()
234
+ def __call__(
235
+ self,
236
+ style_image: Union[torch.Tensor, PIL.Image.Image],
237
+ content_image: Union[torch.Tensor, PIL.Image.Image],
238
+ style_prompt: Optional[str] = None,
239
+ content_prompt: Optional[str] = None,
240
+ height: Optional[int] = 512,
241
+ width: Optional[int] = 512,
242
+ noise_strength: float = 0.6,
243
+ num_inference_steps: Optional[int] = 50,
244
+ guidance_scale: Optional[float] = 7.5,
245
+ batch_size: Optional[int] = 1,
246
+ eta: float = 0.0,
247
+ clip_guidance_scale: Optional[float] = 100,
248
+ generator: Optional[torch.Generator] = None,
249
+ output_type: Optional[str] = "pil",
250
+ return_dict: bool = True,
251
+ slerp_latent_style_strength: float = 0.8,
252
+ slerp_prompt_style_strength: float = 0.1,
253
+ slerp_clip_image_style_strength: float = 0.1,
254
+ ):
255
+ if isinstance(generator, list) and len(generator) != batch_size:
256
+ raise ValueError(f"You have passed {batch_size} batch_size, but only {len(generator)} generators.")
257
+
258
+ if height % 8 != 0 or width % 8 != 0:
259
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
260
+
261
+ if isinstance(generator, torch.Generator) and batch_size > 1:
262
+ generator = [generator] + [None] * (batch_size - 1)
263
+
264
+ coca_is_none = [
265
+ ("model", self.coca_model is None),
266
+ ("tokenizer", self.coca_tokenizer is None),
267
+ ("transform", self.coca_transform is None),
268
+ ]
269
+ coca_is_none = [x[0] for x in coca_is_none if x[1]]
270
+ coca_is_none_str = ", ".join(coca_is_none)
271
+ # generate prompts with coca model if prompt is None
272
+ if content_prompt is None:
273
+ if len(coca_is_none):
274
+ raise ValueError(
275
+ f"Content prompt is None and CoCa [{coca_is_none_str}] is None."
276
+ f"Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
277
+ )
278
+ content_prompt = self.get_image_description(content_image)
279
+ if style_prompt is None:
280
+ if len(coca_is_none):
281
+ raise ValueError(
282
+ f"Style prompt is None and CoCa [{coca_is_none_str}] is None."
283
+ f" Set prompt or pass Coca [{coca_is_none_str}] to DiffusionPipeline."
284
+ )
285
+ style_prompt = self.get_image_description(style_image)
286
+
287
+ # get prompt text embeddings for content and style
288
+ content_text_input = self.tokenizer(
289
+ content_prompt,
290
+ padding="max_length",
291
+ max_length=self.tokenizer.model_max_length,
292
+ truncation=True,
293
+ return_tensors="pt",
294
+ )
295
+ content_text_embeddings = self.text_encoder(content_text_input.input_ids.to(self.device))[0]
296
+
297
+ style_text_input = self.tokenizer(
298
+ style_prompt,
299
+ padding="max_length",
300
+ max_length=self.tokenizer.model_max_length,
301
+ truncation=True,
302
+ return_tensors="pt",
303
+ )
304
+ style_text_embeddings = self.text_encoder(style_text_input.input_ids.to(self.device))[0]
305
+
306
+ text_embeddings = slerp(slerp_prompt_style_strength, content_text_embeddings, style_text_embeddings)
307
+
308
+ # duplicate text embeddings for each generation per prompt
309
+ text_embeddings = text_embeddings.repeat_interleave(batch_size, dim=0)
310
+
311
+ # set timesteps
312
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
313
+ extra_set_kwargs = {}
314
+ if accepts_offset:
315
+ extra_set_kwargs["offset"] = 1
316
+
317
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
318
+ # Some schedulers like PNDM have timesteps as arrays
319
+ # It's more optimized to move all timesteps to correct device beforehand
320
+ self.scheduler.timesteps.to(self.device)
321
+
322
+ timesteps, num_inference_steps = self.get_timesteps(num_inference_steps, noise_strength, self.device)
323
+ latent_timestep = timesteps[:1].repeat(batch_size)
324
+
325
+ # Preprocess image
326
+ preprocessed_content_image = preprocess(content_image, width, height)
327
+ content_latents = self.prepare_latents(
328
+ preprocessed_content_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
329
+ )
330
+
331
+ preprocessed_style_image = preprocess(style_image, width, height)
332
+ style_latents = self.prepare_latents(
333
+ preprocessed_style_image, latent_timestep, batch_size, text_embeddings.dtype, self.device, generator
334
+ )
335
+
336
+ latents = slerp(slerp_latent_style_strength, content_latents, style_latents)
337
+
338
+ if clip_guidance_scale > 0:
339
+ content_clip_image_embedding = self.get_clip_image_embeddings(content_image, batch_size)
340
+ style_clip_image_embedding = self.get_clip_image_embeddings(style_image, batch_size)
341
+ clip_image_embeddings = slerp(
342
+ slerp_clip_image_style_strength, content_clip_image_embedding, style_clip_image_embedding
343
+ )
344
+
345
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
346
+ # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
347
+ # corresponds to doing no classifier free guidance.
348
+ do_classifier_free_guidance = guidance_scale > 1.0
349
+ # get unconditional embeddings for classifier free guidance
350
+ if do_classifier_free_guidance:
351
+ max_length = content_text_input.input_ids.shape[-1]
352
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
353
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
354
+ # duplicate unconditional embeddings for each generation per prompt
355
+ uncond_embeddings = uncond_embeddings.repeat_interleave(batch_size, dim=0)
356
+
357
+ # For classifier free guidance, we need to do two forward passes.
358
+ # Here we concatenate the unconditional and text embeddings into a single batch
359
+ # to avoid doing two forward passes
360
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
361
+
362
+ # get the initial random noise unless the user supplied it
363
+
364
+ # Unlike in other pipelines, latents need to be generated in the target device
365
+ # for 1-to-1 results reproducibility with the CompVis implementation.
366
+ # However this currently doesn't work in `mps`.
367
+ latents_shape = (batch_size, self.unet.config.in_channels, height // 8, width // 8)
368
+ latents_dtype = text_embeddings.dtype
369
+ if latents is None:
370
+ if self.device.type == "mps":
371
+ # randn does not work reproducibly on mps
372
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
373
+ self.device
374
+ )
375
+ else:
376
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
377
+ else:
378
+ if latents.shape != latents_shape:
379
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
380
+ latents = latents.to(self.device)
381
+
382
+ # scale the initial noise by the standard deviation required by the scheduler
383
+ latents = latents * self.scheduler.init_noise_sigma
384
+
385
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
386
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
387
+ # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
388
+ # and should be between [0, 1]
389
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
390
+ extra_step_kwargs = {}
391
+ if accepts_eta:
392
+ extra_step_kwargs["eta"] = eta
393
+
394
+ # check if the scheduler accepts generator
395
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
396
+ if accepts_generator:
397
+ extra_step_kwargs["generator"] = generator
398
+
399
+ with self.progress_bar(total=num_inference_steps) as progress_bar:
400
+ for i, t in enumerate(timesteps):
401
+ # expand the latents if we are doing classifier free guidance
402
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
403
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
404
+
405
+ # predict the noise residual
406
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
407
+
408
+ # perform classifier free guidance
409
+ if do_classifier_free_guidance:
410
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
411
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
412
+
413
+ # perform clip guidance
414
+ if clip_guidance_scale > 0:
415
+ text_embeddings_for_guidance = (
416
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
417
+ )
418
+ noise_pred, latents = self.cond_fn(
419
+ latents,
420
+ t,
421
+ i,
422
+ text_embeddings_for_guidance,
423
+ noise_pred,
424
+ clip_image_embeddings,
425
+ clip_guidance_scale,
426
+ )
427
+
428
+ # compute the previous noisy sample x_t -> x_t-1
429
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
430
+
431
+ progress_bar.update()
432
+ # Hardcode 0.18215 because stable-diffusion-2-base has not self.vae.config.scaling_factor
433
+ latents = 1 / 0.18215 * latents
434
+ image = self.vae.decode(latents).sample
435
+
436
+ image = (image / 2 + 0.5).clamp(0, 1)
437
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
438
+
439
+ if output_type == "pil":
440
+ image = self.numpy_to_pil(image)
441
+
442
+ if not return_dict:
443
+ return (image, None)
444
+
445
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)
exp_code/1_benchmark/diffusers-WanS2V/examples/community/clip_guided_stable_diffusion.py ADDED
@@ -0,0 +1,337 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import inspect
2
+ from typing import List, Optional, Union
3
+
4
+ import torch
5
+ from torch import nn
6
+ from torch.nn import functional as F
7
+ from torchvision import transforms
8
+ from transformers import CLIPImageProcessor, CLIPModel, CLIPTextModel, CLIPTokenizer
9
+
10
+ from diffusers import (
11
+ AutoencoderKL,
12
+ DDIMScheduler,
13
+ DPMSolverMultistepScheduler,
14
+ LMSDiscreteScheduler,
15
+ PNDMScheduler,
16
+ UNet2DConditionModel,
17
+ )
18
+ from diffusers.pipelines.pipeline_utils import DiffusionPipeline, StableDiffusionMixin
19
+ from diffusers.pipelines.stable_diffusion.pipeline_stable_diffusion import StableDiffusionPipelineOutput
20
+
21
+
22
+ class MakeCutouts(nn.Module):
23
+ def __init__(self, cut_size, cut_power=1.0):
24
+ super().__init__()
25
+
26
+ self.cut_size = cut_size
27
+ self.cut_power = cut_power
28
+
29
+ def forward(self, pixel_values, num_cutouts):
30
+ sideY, sideX = pixel_values.shape[2:4]
31
+ max_size = min(sideX, sideY)
32
+ min_size = min(sideX, sideY, self.cut_size)
33
+ cutouts = []
34
+ for _ in range(num_cutouts):
35
+ size = int(torch.rand([]) ** self.cut_power * (max_size - min_size) + min_size)
36
+ offsetx = torch.randint(0, sideX - size + 1, ())
37
+ offsety = torch.randint(0, sideY - size + 1, ())
38
+ cutout = pixel_values[:, :, offsety : offsety + size, offsetx : offsetx + size]
39
+ cutouts.append(F.adaptive_avg_pool2d(cutout, self.cut_size))
40
+ return torch.cat(cutouts)
41
+
42
+
43
+ def spherical_dist_loss(x, y):
44
+ x = F.normalize(x, dim=-1)
45
+ y = F.normalize(y, dim=-1)
46
+ return (x - y).norm(dim=-1).div(2).arcsin().pow(2).mul(2)
47
+
48
+
49
+ def set_requires_grad(model, value):
50
+ for param in model.parameters():
51
+ param.requires_grad = value
52
+
53
+
54
+ class CLIPGuidedStableDiffusion(DiffusionPipeline, StableDiffusionMixin):
55
+ """CLIP guided stable diffusion based on the amazing repo by @crowsonkb and @Jack000
56
+ - https://github.com/Jack000/glid-3-xl
57
+ - https://github.dev/crowsonkb/k-diffusion
58
+ """
59
+
60
+ def __init__(
61
+ self,
62
+ vae: AutoencoderKL,
63
+ text_encoder: CLIPTextModel,
64
+ clip_model: CLIPModel,
65
+ tokenizer: CLIPTokenizer,
66
+ unet: UNet2DConditionModel,
67
+ scheduler: Union[PNDMScheduler, LMSDiscreteScheduler, DDIMScheduler, DPMSolverMultistepScheduler],
68
+ feature_extractor: CLIPImageProcessor,
69
+ ):
70
+ super().__init__()
71
+ self.register_modules(
72
+ vae=vae,
73
+ text_encoder=text_encoder,
74
+ clip_model=clip_model,
75
+ tokenizer=tokenizer,
76
+ unet=unet,
77
+ scheduler=scheduler,
78
+ feature_extractor=feature_extractor,
79
+ )
80
+
81
+ self.normalize = transforms.Normalize(mean=feature_extractor.image_mean, std=feature_extractor.image_std)
82
+ self.cut_out_size = (
83
+ feature_extractor.size
84
+ if isinstance(feature_extractor.size, int)
85
+ else feature_extractor.size["shortest_edge"]
86
+ )
87
+ self.make_cutouts = MakeCutouts(self.cut_out_size)
88
+
89
+ set_requires_grad(self.text_encoder, False)
90
+ set_requires_grad(self.clip_model, False)
91
+
92
+ def freeze_vae(self):
93
+ set_requires_grad(self.vae, False)
94
+
95
+ def unfreeze_vae(self):
96
+ set_requires_grad(self.vae, True)
97
+
98
+ def freeze_unet(self):
99
+ set_requires_grad(self.unet, False)
100
+
101
+ def unfreeze_unet(self):
102
+ set_requires_grad(self.unet, True)
103
+
104
+ @torch.enable_grad()
105
+ def cond_fn(
106
+ self,
107
+ latents,
108
+ timestep,
109
+ index,
110
+ text_embeddings,
111
+ noise_pred_original,
112
+ text_embeddings_clip,
113
+ clip_guidance_scale,
114
+ num_cutouts,
115
+ use_cutouts=True,
116
+ ):
117
+ latents = latents.detach().requires_grad_()
118
+
119
+ latent_model_input = self.scheduler.scale_model_input(latents, timestep)
120
+
121
+ # predict the noise residual
122
+ noise_pred = self.unet(latent_model_input, timestep, encoder_hidden_states=text_embeddings).sample
123
+
124
+ if isinstance(self.scheduler, (PNDMScheduler, DDIMScheduler, DPMSolverMultistepScheduler)):
125
+ alpha_prod_t = self.scheduler.alphas_cumprod[timestep]
126
+ beta_prod_t = 1 - alpha_prod_t
127
+ # compute predicted original sample from predicted noise also called
128
+ # "predicted x_0" of formula (12) from https://huggingface.co/papers/2010.02502
129
+ pred_original_sample = (latents - beta_prod_t ** (0.5) * noise_pred) / alpha_prod_t ** (0.5)
130
+
131
+ fac = torch.sqrt(beta_prod_t)
132
+ sample = pred_original_sample * (fac) + latents * (1 - fac)
133
+ elif isinstance(self.scheduler, LMSDiscreteScheduler):
134
+ sigma = self.scheduler.sigmas[index]
135
+ sample = latents - sigma * noise_pred
136
+ else:
137
+ raise ValueError(f"scheduler type {type(self.scheduler)} not supported")
138
+
139
+ sample = 1 / self.vae.config.scaling_factor * sample
140
+ image = self.vae.decode(sample).sample
141
+ image = (image / 2 + 0.5).clamp(0, 1)
142
+
143
+ if use_cutouts:
144
+ image = self.make_cutouts(image, num_cutouts)
145
+ else:
146
+ image = transforms.Resize(self.cut_out_size)(image)
147
+ image = self.normalize(image).to(latents.dtype)
148
+
149
+ image_embeddings_clip = self.clip_model.get_image_features(image)
150
+ image_embeddings_clip = image_embeddings_clip / image_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
151
+
152
+ if use_cutouts:
153
+ dists = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip)
154
+ dists = dists.view([num_cutouts, sample.shape[0], -1])
155
+ loss = dists.sum(2).mean(0).sum() * clip_guidance_scale
156
+ else:
157
+ loss = spherical_dist_loss(image_embeddings_clip, text_embeddings_clip).mean() * clip_guidance_scale
158
+
159
+ grads = -torch.autograd.grad(loss, latents)[0]
160
+
161
+ if isinstance(self.scheduler, LMSDiscreteScheduler):
162
+ latents = latents.detach() + grads * (sigma**2)
163
+ noise_pred = noise_pred_original
164
+ else:
165
+ noise_pred = noise_pred_original - torch.sqrt(beta_prod_t) * grads
166
+ return noise_pred, latents
167
+
168
+ @torch.no_grad()
169
+ def __call__(
170
+ self,
171
+ prompt: Union[str, List[str]],
172
+ height: Optional[int] = 512,
173
+ width: Optional[int] = 512,
174
+ num_inference_steps: Optional[int] = 50,
175
+ guidance_scale: Optional[float] = 7.5,
176
+ num_images_per_prompt: Optional[int] = 1,
177
+ eta: float = 0.0,
178
+ clip_guidance_scale: Optional[float] = 100,
179
+ clip_prompt: Optional[Union[str, List[str]]] = None,
180
+ num_cutouts: Optional[int] = 4,
181
+ use_cutouts: Optional[bool] = True,
182
+ generator: Optional[torch.Generator] = None,
183
+ latents: Optional[torch.Tensor] = None,
184
+ output_type: Optional[str] = "pil",
185
+ return_dict: bool = True,
186
+ ):
187
+ if isinstance(prompt, str):
188
+ batch_size = 1
189
+ elif isinstance(prompt, list):
190
+ batch_size = len(prompt)
191
+ else:
192
+ raise ValueError(f"`prompt` has to be of type `str` or `list` but is {type(prompt)}")
193
+
194
+ if height % 8 != 0 or width % 8 != 0:
195
+ raise ValueError(f"`height` and `width` have to be divisible by 8 but are {height} and {width}.")
196
+
197
+ # get prompt text embeddings
198
+ text_input = self.tokenizer(
199
+ prompt,
200
+ padding="max_length",
201
+ max_length=self.tokenizer.model_max_length,
202
+ truncation=True,
203
+ return_tensors="pt",
204
+ )
205
+ text_embeddings = self.text_encoder(text_input.input_ids.to(self.device))[0]
206
+ # duplicate text embeddings for each generation per prompt
207
+ text_embeddings = text_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
208
+
209
+ if clip_guidance_scale > 0:
210
+ if clip_prompt is not None:
211
+ clip_text_input = self.tokenizer(
212
+ clip_prompt,
213
+ padding="max_length",
214
+ max_length=self.tokenizer.model_max_length,
215
+ truncation=True,
216
+ return_tensors="pt",
217
+ ).input_ids.to(self.device)
218
+ else:
219
+ clip_text_input = text_input.input_ids.to(self.device)
220
+ text_embeddings_clip = self.clip_model.get_text_features(clip_text_input)
221
+ text_embeddings_clip = text_embeddings_clip / text_embeddings_clip.norm(p=2, dim=-1, keepdim=True)
222
+ # duplicate text embeddings clip for each generation per prompt
223
+ text_embeddings_clip = text_embeddings_clip.repeat_interleave(num_images_per_prompt, dim=0)
224
+
225
+ # here `guidance_scale` is defined analog to the guidance weight `w` of equation (2)
226
+ # of the Imagen paper: https://huggingface.co/papers/2205.11487 . `guidance_scale = 1`
227
+ # corresponds to doing no classifier free guidance.
228
+ do_classifier_free_guidance = guidance_scale > 1.0
229
+ # get unconditional embeddings for classifier free guidance
230
+ if do_classifier_free_guidance:
231
+ max_length = text_input.input_ids.shape[-1]
232
+ uncond_input = self.tokenizer([""], padding="max_length", max_length=max_length, return_tensors="pt")
233
+ uncond_embeddings = self.text_encoder(uncond_input.input_ids.to(self.device))[0]
234
+ # duplicate unconditional embeddings for each generation per prompt
235
+ uncond_embeddings = uncond_embeddings.repeat_interleave(num_images_per_prompt, dim=0)
236
+
237
+ # For classifier free guidance, we need to do two forward passes.
238
+ # Here we concatenate the unconditional and text embeddings into a single batch
239
+ # to avoid doing two forward passes
240
+ text_embeddings = torch.cat([uncond_embeddings, text_embeddings])
241
+
242
+ # get the initial random noise unless the user supplied it
243
+
244
+ # Unlike in other pipelines, latents need to be generated in the target device
245
+ # for 1-to-1 results reproducibility with the CompVis implementation.
246
+ # However this currently doesn't work in `mps`.
247
+ latents_shape = (batch_size * num_images_per_prompt, self.unet.config.in_channels, height // 8, width // 8)
248
+ latents_dtype = text_embeddings.dtype
249
+ if latents is None:
250
+ if self.device.type == "mps":
251
+ # randn does not work reproducibly on mps
252
+ latents = torch.randn(latents_shape, generator=generator, device="cpu", dtype=latents_dtype).to(
253
+ self.device
254
+ )
255
+ else:
256
+ latents = torch.randn(latents_shape, generator=generator, device=self.device, dtype=latents_dtype)
257
+ else:
258
+ if latents.shape != latents_shape:
259
+ raise ValueError(f"Unexpected latents shape, got {latents.shape}, expected {latents_shape}")
260
+ latents = latents.to(self.device)
261
+
262
+ # set timesteps
263
+ accepts_offset = "offset" in set(inspect.signature(self.scheduler.set_timesteps).parameters.keys())
264
+ extra_set_kwargs = {}
265
+ if accepts_offset:
266
+ extra_set_kwargs["offset"] = 1
267
+
268
+ self.scheduler.set_timesteps(num_inference_steps, **extra_set_kwargs)
269
+
270
+ # Some schedulers like PNDM have timesteps as arrays
271
+ # It's more optimized to move all timesteps to correct device beforehand
272
+ timesteps_tensor = self.scheduler.timesteps.to(self.device)
273
+
274
+ # scale the initial noise by the standard deviation required by the scheduler
275
+ latents = latents * self.scheduler.init_noise_sigma
276
+
277
+ # prepare extra kwargs for the scheduler step, since not all schedulers have the same signature
278
+ # eta (η) is only used with the DDIMScheduler, it will be ignored for other schedulers.
279
+ # eta corresponds to η in DDIM paper: https://huggingface.co/papers/2010.02502
280
+ # and should be between [0, 1]
281
+ accepts_eta = "eta" in set(inspect.signature(self.scheduler.step).parameters.keys())
282
+ extra_step_kwargs = {}
283
+ if accepts_eta:
284
+ extra_step_kwargs["eta"] = eta
285
+
286
+ # check if the scheduler accepts generator
287
+ accepts_generator = "generator" in set(inspect.signature(self.scheduler.step).parameters.keys())
288
+ if accepts_generator:
289
+ extra_step_kwargs["generator"] = generator
290
+
291
+ for i, t in enumerate(self.progress_bar(timesteps_tensor)):
292
+ # expand the latents if we are doing classifier free guidance
293
+ latent_model_input = torch.cat([latents] * 2) if do_classifier_free_guidance else latents
294
+ latent_model_input = self.scheduler.scale_model_input(latent_model_input, t)
295
+
296
+ # predict the noise residual
297
+ noise_pred = self.unet(latent_model_input, t, encoder_hidden_states=text_embeddings).sample
298
+
299
+ # perform classifier free guidance
300
+ if do_classifier_free_guidance:
301
+ noise_pred_uncond, noise_pred_text = noise_pred.chunk(2)
302
+ noise_pred = noise_pred_uncond + guidance_scale * (noise_pred_text - noise_pred_uncond)
303
+
304
+ # perform clip guidance
305
+ if clip_guidance_scale > 0:
306
+ text_embeddings_for_guidance = (
307
+ text_embeddings.chunk(2)[1] if do_classifier_free_guidance else text_embeddings
308
+ )
309
+ noise_pred, latents = self.cond_fn(
310
+ latents,
311
+ t,
312
+ i,
313
+ text_embeddings_for_guidance,
314
+ noise_pred,
315
+ text_embeddings_clip,
316
+ clip_guidance_scale,
317
+ num_cutouts,
318
+ use_cutouts,
319
+ )
320
+
321
+ # compute the previous noisy sample x_t -> x_t-1
322
+ latents = self.scheduler.step(noise_pred, t, latents, **extra_step_kwargs).prev_sample
323
+
324
+ # scale and decode the image latents with vae
325
+ latents = 1 / self.vae.config.scaling_factor * latents
326
+ image = self.vae.decode(latents).sample
327
+
328
+ image = (image / 2 + 0.5).clamp(0, 1)
329
+ image = image.cpu().permute(0, 2, 3, 1).numpy()
330
+
331
+ if output_type == "pil":
332
+ image = self.numpy_to_pil(image)
333
+
334
+ if not return_dict:
335
+ return (image, None)
336
+
337
+ return StableDiffusionPipelineOutput(images=image, nsfw_content_detected=None)