SuperCS commited on
Commit
6c04222
·
verified ·
1 Parent(s): 4c25524

Add files using upload-large-folder tool

Browse files
This view is limited to 50 files because it contains too many changes.   See raw diff
Files changed (50) hide show
  1. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Kontext-dev.sh +14 -0
  2. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Krea-dev.sh +12 -0
  3. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh +14 -0
  4. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh +14 -0
  5. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh +14 -0
  6. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh +14 -0
  7. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh +14 -0
  8. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh +14 -0
  9. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh +14 -0
  10. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev.sh +12 -0
  11. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Nexus-Gen.sh +14 -0
  12. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Step1X-Edit.sh +14 -0
  13. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config.yaml +22 -0
  14. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config_zero2offload.yaml +22 -0
  15. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLEX.2-preview.sh +15 -0
  16. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh +17 -0
  17. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Krea-dev.sh +15 -0
  18. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh +17 -0
  19. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh +17 -0
  20. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh +17 -0
  21. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh +17 -0
  22. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh +17 -0
  23. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh +17 -0
  24. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh +17 -0
  25. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev.sh +15 -0
  26. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Nexus-Gen.sh +17 -0
  27. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Step1X-Edit.sh +17 -0
  28. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLEX.2-preview.py +20 -0
  29. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py +26 -0
  30. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py +20 -0
  31. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py +21 -0
  32. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py +31 -0
  33. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py +31 -0
  34. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py +30 -0
  35. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py +28 -0
  36. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py +33 -0
  37. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py +25 -0
  38. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev.py +20 -0
  39. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Nexus-Gen.py +28 -0
  40. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Step1X-Edit.py +25 -0
  41. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLEX.2-preview.py +18 -0
  42. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py +24 -0
  43. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py +18 -0
  44. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py +19 -0
  45. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py +29 -0
  46. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py +29 -0
  47. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py +28 -0
  48. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py +33 -0
  49. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py +26 -0
  50. exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py +28 -0
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Kontext-dev.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \
4
+ --data_file_keys "image,kontext_images" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-Kontext-dev_full" \
12
+ --trainable_models "dit" \
13
+ --extra_inputs "kontext_images" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-Krea-dev.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
4
+ --max_pixels 1048576 \
5
+ --dataset_repeat 400 \
6
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Krea-dev:flux1-krea-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
7
+ --learning_rate 1e-5 \
8
+ --num_epochs 1 \
9
+ --remove_prefix_in_ckpt "pipe.dit." \
10
+ --output_path "./models/train/FLUX.1-Krea-dev_full" \
11
+ --trainable_models "dit" \
12
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-AttriCtrl.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_attrictrl.csv \
4
+ --data_file_keys "image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,DiffSynth-Studio/AttriCtrl-FLUX.1-Dev:models/brightness.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.value_controller.encoders.0." \
11
+ --output_path "./models/train/FLUX.1-dev-AttriCtrl_full" \
12
+ --trainable_models "value_controller" \
13
+ --extra_inputs "value_controller_inputs" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Inpainting-Beta.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_inpaint.csv \
4
+ --data_file_keys "image,controlnet_image,controlnet_inpaint_mask" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta:diffusion_pytorch_model.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.controlnet.models.0." \
11
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_full" \
12
+ --trainable_models "controlnet" \
13
+ --extra_inputs "controlnet_image,controlnet_inpaint_mask" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Union-alpha.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_canny.csv \
4
+ --data_file_keys "image,controlnet_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-Controlnet-Union-alpha:diffusion_pytorch_model.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.controlnet.models.0." \
11
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Union-alpha_full" \
12
+ --trainable_models "controlnet" \
13
+ --extra_inputs "controlnet_image,controlnet_processor_id" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-Controlnet-Upscaler.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_upscale.csv \
4
+ --data_file_keys "image,controlnet_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,jasperai/Flux.1-dev-Controlnet-Upscaler:diffusion_pytorch_model.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.controlnet.models.0." \
11
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Upscaler_full" \
12
+ --trainable_models "controlnet" \
13
+ --extra_inputs "controlnet_image" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-IP-Adapter.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_ipadapter.csv \
4
+ --data_file_keys "image,ipadapter_images" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-IP-Adapter:ip-adapter.bin,google/siglip-so400m-patch14-384:" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.ipadapter." \
11
+ --output_path "./models/train/FLUX.1-dev-IP-Adapter_full" \
12
+ --trainable_models "ipadapter" \
13
+ --extra_inputs "ipadapter_images" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-InfiniteYou.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_infiniteyou.csv \
4
+ --data_file_keys "image,controlnet_image,infinityou_id_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/image_proj_model.bin,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe." \
11
+ --output_path "./models/train/FLUX.1-dev-InfiniteYou_full" \
12
+ --trainable_models "controlnet,image_proj_model" \
13
+ --extra_inputs "controlnet_image,infinityou_id_image,infinityou_guidance" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev-LoRA-Encoder.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_lora_encoder.csv \
4
+ --data_file_keys "image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,DiffSynth-Studio/LoRA-Encoder-FLUX.1-Dev:model.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.lora_encoder." \
11
+ --output_path "./models/train/FLUX.1-dev-LoRA-Encoder_full" \
12
+ --trainable_models "lora_encoder" \
13
+ --extra_inputs "lora_encoder_inputs" \
14
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/FLUX.1-dev.sh ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
4
+ --max_pixels 1048576 \
5
+ --dataset_repeat 400 \
6
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
7
+ --learning_rate 1e-5 \
8
+ --num_epochs 1 \
9
+ --remove_prefix_in_ckpt "pipe.dit." \
10
+ --output_path "./models/train/FLUX.1-dev_full" \
11
+ --trainable_models "dit" \
12
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Nexus-Gen.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config_zero2offload.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_nexusgen_edit.csv \
4
+ --data_file_keys "image,nexus_gen_reference_image" \
5
+ --max_pixels 262144 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "DiffSynth-Studio/Nexus-GenV2:model*.safetensors,DiffSynth-Studio/Nexus-GenV2:edit_decoder.bin,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-NexusGen-Edit_full" \
12
+ --trainable_models "dit" \
13
+ --extra_inputs "nexus_gen_reference_image" \
14
+ --use_gradient_checkpointing_offload
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/Step1X-Edit.sh ADDED
@@ -0,0 +1,14 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch --config_file examples/flux/model_training/full/accelerate_config.yaml examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_step1x.csv \
4
+ --data_file_keys "image,step1x_reference_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "Qwen/Qwen2.5-VL-7B-Instruct:,stepfun-ai/Step1X-Edit:step1x-edit-i1258.safetensors,stepfun-ai/Step1X-Edit:vae.safetensors" \
8
+ --learning_rate 1e-5 \
9
+ --num_epochs 1 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/Step1X-Edit_full" \
12
+ --trainable_models "dit" \
13
+ --extra_inputs "step1x_reference_image" \
14
+ --use_gradient_checkpointing_offload
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ deepspeed_config:
4
+ gradient_accumulation_steps: 1
5
+ offload_optimizer_device: none
6
+ offload_param_device: none
7
+ zero3_init_flag: false
8
+ zero_stage: 2
9
+ distributed_type: DEEPSPEED
10
+ downcast_bf16: 'no'
11
+ enable_cpu_affinity: false
12
+ machine_rank: 0
13
+ main_training_function: main
14
+ mixed_precision: bf16
15
+ num_machines: 1
16
+ num_processes: 8
17
+ rdzv_backend: static
18
+ same_network: true
19
+ tpu_env: []
20
+ tpu_use_cluster: false
21
+ tpu_use_sudo: false
22
+ use_cpu: false
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/full/accelerate_config_zero2offload.yaml ADDED
@@ -0,0 +1,22 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ compute_environment: LOCAL_MACHINE
2
+ debug: false
3
+ deepspeed_config:
4
+ gradient_accumulation_steps: 1
5
+ offload_optimizer_device: 'cpu'
6
+ offload_param_device: 'cpu'
7
+ zero3_init_flag: false
8
+ zero_stage: 2
9
+ distributed_type: DEEPSPEED
10
+ downcast_bf16: 'no'
11
+ enable_cpu_affinity: false
12
+ machine_rank: 0
13
+ main_training_function: main
14
+ mixed_precision: bf16
15
+ num_machines: 1
16
+ num_processes: 8
17
+ rdzv_backend: static
18
+ same_network: true
19
+ tpu_env: []
20
+ tpu_use_cluster: false
21
+ tpu_use_sudo: false
22
+ use_cpu: false
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLEX.2-preview.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
4
+ --max_pixels 1048576 \
5
+ --dataset_repeat 50 \
6
+ --model_id_with_origin_paths "ostris/Flex.2-preview:Flex.2-preview.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
7
+ --learning_rate 1e-4 \
8
+ --num_epochs 5 \
9
+ --remove_prefix_in_ckpt "pipe.dit." \
10
+ --output_path "./models/train/FLEX.2-preview_lora" \
11
+ --lora_base_model "dit" \
12
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
13
+ --lora_rank 32 \
14
+ --align_to_opensource_format \
15
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Kontext-dev.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_kontext.csv \
4
+ --data_file_keys "image,kontext_images" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Kontext-dev:flux1-kontext-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-Kontext-dev_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --align_to_opensource_format \
16
+ --extra_inputs "kontext_images" \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-Krea-dev.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
4
+ --max_pixels 1048576 \
5
+ --dataset_repeat 50 \
6
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-Krea-dev:flux1-krea-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
7
+ --learning_rate 1e-4 \
8
+ --num_epochs 5 \
9
+ --remove_prefix_in_ckpt "pipe.dit." \
10
+ --output_path "./models/train/FLUX.1-Krea-dev_lora" \
11
+ --lora_base_model "dit" \
12
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
13
+ --lora_rank 32 \
14
+ --align_to_opensource_format \
15
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-AttriCtrl.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_attrictrl.csv \
4
+ --data_file_keys "image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,DiffSynth-Studio/AttriCtrl-FLUX.1-Dev:models/brightness.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-AttriCtrl_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "value_controller_inputs" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Inpainting-Beta.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_inpaint.csv \
4
+ --data_file_keys "image,controlnet_image,controlnet_inpaint_mask" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta:diffusion_pytorch_model.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "controlnet_image,controlnet_inpaint_mask" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Union-alpha.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_canny.csv \
4
+ --data_file_keys "image,controlnet_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-Controlnet-Union-alpha:diffusion_pytorch_model.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Union-alpha_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "controlnet_image,controlnet_processor_id" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-Controlnet-Upscaler.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_controlnet_upscale.csv \
4
+ --data_file_keys "image,controlnet_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,jasperai/Flux.1-dev-Controlnet-Upscaler:diffusion_pytorch_model.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-Controlnet-Upscaler_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "controlnet_image" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-EliGen.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_eligen.json \
4
+ --data_file_keys "image,eligen_entity_masks" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 50 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-EliGen_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --align_to_opensource_format \
16
+ --extra_inputs "eligen_entity_masks,eligen_entity_prompts" \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-IP-Adapter.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_ipadapter.csv \
4
+ --data_file_keys "image,ipadapter_images" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 50 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,InstantX/FLUX.1-dev-IP-Adapter:ip-adapter.bin,google/siglip-so400m-patch14-384:" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-IP-Adapter_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "ipadapter_images" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev-InfiniteYou.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_infiniteyou.csv \
4
+ --data_file_keys "image,controlnet_image,infinityou_id_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 100 \
7
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/image_proj_model.bin,ByteDance/InfiniteYou:infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-dev-InfiniteYou_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "controlnet_image,infinityou_id_image,infinityou_guidance" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/FLUX.1-dev.sh ADDED
@@ -0,0 +1,15 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata.csv \
4
+ --max_pixels 1048576 \
5
+ --dataset_repeat 50 \
6
+ --model_id_with_origin_paths "black-forest-labs/FLUX.1-dev:flux1-dev.safetensors,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
7
+ --learning_rate 1e-4 \
8
+ --num_epochs 5 \
9
+ --remove_prefix_in_ckpt "pipe.dit." \
10
+ --output_path "./models/train/FLUX.1-dev_lora" \
11
+ --lora_base_model "dit" \
12
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
13
+ --lora_rank 32 \
14
+ --align_to_opensource_format \
15
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Nexus-Gen.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_nexusgen_edit.csv \
4
+ --data_file_keys "image,nexus_gen_reference_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 400 \
7
+ --model_id_with_origin_paths "DiffSynth-Studio/Nexus-GenV2:model*.safetensors,DiffSynth-Studio/Nexus-GenV2:edit_decoder.bin,black-forest-labs/FLUX.1-dev:text_encoder/model.safetensors,black-forest-labs/FLUX.1-dev:text_encoder_2/,black-forest-labs/FLUX.1-dev:ae.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/FLUX.1-NexusGen-Edit_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --align_to_opensource_format \
16
+ --extra_inputs "nexus_gen_reference_image" \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/lora/Step1X-Edit.sh ADDED
@@ -0,0 +1,17 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ accelerate launch examples/flux/model_training/train.py \
2
+ --dataset_base_path data/example_image_dataset \
3
+ --dataset_metadata_path data/example_image_dataset/metadata_step1x.csv \
4
+ --data_file_keys "image,step1x_reference_image" \
5
+ --max_pixels 1048576 \
6
+ --dataset_repeat 50 \
7
+ --model_id_with_origin_paths "Qwen/Qwen2.5-VL-7B-Instruct:,stepfun-ai/Step1X-Edit:step1x-edit-i1258.safetensors,stepfun-ai/Step1X-Edit:vae.safetensors" \
8
+ --learning_rate 1e-4 \
9
+ --num_epochs 5 \
10
+ --remove_prefix_in_ckpt "pipe.dit." \
11
+ --output_path "./models/train/Step1X-Edit_lora" \
12
+ --lora_base_model "dit" \
13
+ --lora_target_modules "a_to_qkv,b_to_qkv,ff_a.0,ff_a.2,ff_b.0,ff_b.2,a_to_out,b_to_out,proj_out,norm.linear,norm1_a.linear,norm1_b.linear,to_qkv_mlp" \
14
+ --lora_rank 32 \
15
+ --extra_inputs "step1x_reference_image" \
16
+ --align_to_opensource_format \
17
+ --use_gradient_checkpointing
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLEX.2-preview.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="ostris/Flex.2-preview", origin_file_pattern="Flex.2-preview.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ],
15
+ )
16
+ state_dict = load_state_dict("models/train/FLEX.2-preview_full/epoch-0.safetensors")
17
+ pipe.dit.load_state_dict(state_dict)
18
+
19
+ image = pipe(prompt="dog,white and brown dog, sitting on wall, under pink flowers", seed=0)
20
+ image.save("image_FLEX.2-preview_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Kontext-dev.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Kontext-dev", origin_file_pattern="flux1-kontext-dev.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ],
16
+ )
17
+ state_dict = load_state_dict("models/train/FLUX.1-Kontext-dev_full/epoch-0.safetensors")
18
+ pipe.dit.load_state_dict(state_dict)
19
+
20
+ image = pipe(
21
+ prompt="Make the dog turn its head around.",
22
+ kontext_images=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
23
+ height=768, width=768,
24
+ seed=0
25
+ )
26
+ image.save("image_FLUX.1-Kontext-dev_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-Krea-dev.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Krea-dev", origin_file_pattern="flux1-krea-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ],
15
+ )
16
+ state_dict = load_state_dict("models/train/FLUX.1-Krea-dev_full/epoch-0.safetensors")
17
+ pipe.dit.load_state_dict(state_dict)
18
+
19
+ image = pipe(prompt="a dog", seed=0)
20
+ image.save("image_FLUX.1-Krea-dev_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-AttriCtrl.py ADDED
@@ -0,0 +1,21 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="DiffSynth-Studio/AttriCtrl-FLUX.1-Dev", origin_file_pattern="models/brightness.safetensors")
15
+ ],
16
+ )
17
+ state_dict = load_state_dict("models/train/FLUX.1-dev-AttriCtrl_full/epoch-0.safetensors")
18
+ pipe.value_controller.encoders[0].load_state_dict(state_dict)
19
+
20
+ image = pipe(prompt="a cat", seed=0, value_controller_inputs=0.1, rand_device="cuda")
21
+ image.save("image_FLUX.1-dev-AttriCtrl_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Inpainting-Beta.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ModelConfig(model_id="alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", origin_file_pattern="diffusion_pytorch_model.safetensors"),
16
+ ],
17
+ )
18
+ state_dict = load_state_dict("models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_full/epoch-0.safetensors")
19
+ pipe.controlnet.models[0].load_state_dict(state_dict)
20
+
21
+ image = pipe(
22
+ prompt="a cat sitting on a chair, wearing sunglasses",
23
+ controlnet_inputs=[ControlNetInput(
24
+ image=Image.open("data/example_image_dataset/inpaint/image_1.jpg"),
25
+ inpaint_mask=Image.open("data/example_image_dataset/inpaint/mask.jpg"),
26
+ scale=0.9
27
+ )],
28
+ height=1024, width=1024,
29
+ seed=0, rand_device="cuda",
30
+ )
31
+ image.save("image_FLUX.1-dev-Controlnet-Inpainting-Beta_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Union-alpha.py ADDED
@@ -0,0 +1,31 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ModelConfig(model_id="InstantX/FLUX.1-dev-Controlnet-Union-alpha", origin_file_pattern="diffusion_pytorch_model.safetensors"),
16
+ ],
17
+ )
18
+ state_dict = load_state_dict("models/train/FLUX.1-dev-Controlnet-Union-alpha_full/epoch-0.safetensors")
19
+ pipe.controlnet.models[0].load_state_dict(state_dict)
20
+
21
+ image = pipe(
22
+ prompt="a dog",
23
+ controlnet_inputs=[ControlNetInput(
24
+ image=Image.open("data/example_image_dataset/canny/image_1.jpg"),
25
+ scale=0.9,
26
+ processor_id="canny",
27
+ )],
28
+ height=768, width=768,
29
+ seed=0, rand_device="cuda",
30
+ )
31
+ image.save("image_FLUX.1-dev-Controlnet-Union-alpha_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-Controlnet-Upscaler.py ADDED
@@ -0,0 +1,30 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ModelConfig(model_id="jasperai/Flux.1-dev-Controlnet-Upscaler", origin_file_pattern="diffusion_pytorch_model.safetensors"),
16
+ ],
17
+ )
18
+ state_dict = load_state_dict("models/train/FLUX.1-dev-Controlnet-Upscaler_full/epoch-0.safetensors")
19
+ pipe.controlnet.models[0].load_state_dict(state_dict)
20
+
21
+ image = pipe(
22
+ prompt="a dog",
23
+ controlnet_inputs=[ControlNetInput(
24
+ image=Image.open("data/example_image_dataset/upscale/image_1.jpg"),
25
+ scale=0.9
26
+ )],
27
+ height=768, width=768,
28
+ seed=0, rand_device="cuda",
29
+ )
30
+ image.save("image_FLUX.1-dev-Controlnet-Upscaler_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-IP-Adapter.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ModelConfig(model_id="InstantX/FLUX.1-dev-IP-Adapter", origin_file_pattern="ip-adapter.bin"),
16
+ ModelConfig(model_id="google/siglip-so400m-patch14-384"),
17
+ ],
18
+ )
19
+ state_dict = load_state_dict("models/train/FLUX.1-dev-IP-Adapter_full/epoch-0.safetensors")
20
+ pipe.ipadapter.load_state_dict(state_dict)
21
+
22
+ image = pipe(
23
+ prompt="a dog",
24
+ ipadapter_images=Image.open("data/example_image_dataset/1.jpg"),
25
+ height=768, width=768,
26
+ seed=0
27
+ )
28
+ image.save("image_FLUX.1-dev-IP-Adapter_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-InfiniteYou.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/image_proj_model.bin"),
16
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors"),
17
+ ],
18
+ )
19
+ state_dict = load_state_dict("models/train/FLUX.1-dev-InfiniteYou_full/epoch-0.safetensors")
20
+ state_dict_projector = {i.replace("image_proj_model.", ""): state_dict[i] for i in state_dict if i.startswith("image_proj_model.")}
21
+ pipe.image_proj_model.load_state_dict(state_dict_projector)
22
+ state_dict_controlnet = {i.replace("controlnet.models.0.", ""): state_dict[i] for i in state_dict if i.startswith("controlnet.models.0.")}
23
+ pipe.controlnet.models[0].load_state_dict(state_dict_controlnet)
24
+
25
+ image = pipe(
26
+ prompt="a man with a red hat",
27
+ controlnet_inputs=[ControlNetInput(
28
+ image=Image.open("data/example_image_dataset/infiniteyou/image_1.jpg"),
29
+ )],
30
+ height=1024, width=1024,
31
+ seed=0, rand_device="cuda",
32
+ )
33
+ image.save("image_FLUX.1-dev-InfiniteYou_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev-LoRA-Encoder.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="DiffSynth-Studio/LoRA-Encoder-FLUX.1-Dev", origin_file_pattern="model.safetensors"),
15
+ ],
16
+ )
17
+ pipe.enable_lora_magic()
18
+ state_dict = load_state_dict("models/train/FLUX.1-dev-LoRA-Encoder_full/epoch-0.safetensors")
19
+ pipe.lora_encoder.load_state_dict(state_dict)
20
+
21
+ lora = ModelConfig(model_id="VoidOc/flux_animal_forest1", origin_file_pattern="20.safetensors")
22
+ pipe.load_lora(pipe.dit, lora, hotload=True) # Use `pipe.clear_lora()` to drop the loaded LoRA.
23
+
24
+ image = pipe(prompt="", seed=0, lora_encoder_inputs=lora)
25
+ image.save("image_FLUX.1-dev-LoRA-Encoder_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/FLUX.1-dev.py ADDED
@@ -0,0 +1,20 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ],
15
+ )
16
+ state_dict = load_state_dict("models/train/FLUX.1-dev_full/epoch-0.safetensors")
17
+ pipe.dit.load_state_dict(state_dict)
18
+
19
+ image = pipe(prompt="a dog", seed=0)
20
+ image.save("image_FLUX.1-dev_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Nexus-Gen.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
4
+ from diffsynth import load_state_dict
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="model*.safetensors"),
11
+ ModelConfig(model_id="DiffSynth-Studio/Nexus-GenV2", origin_file_pattern="edit_decoder.bin"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
14
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
15
+ ],
16
+ )
17
+ state_dict = load_state_dict("models/train/FLUX.1-NexusGen-Edit_full/epoch-0.safetensors")
18
+ pipe.dit.load_state_dict(state_dict)
19
+
20
+ ref_image = Image.open("data/example_image_dataset/nexus_gen/image_1.png").convert("RGB")
21
+ prompt = "Add a pair of sunglasses."
22
+ image = pipe(
23
+ prompt=prompt, negative_prompt="",
24
+ seed=42, cfg_scale=2.0, num_inference_steps=50,
25
+ nexus_gen_reference_image=ref_image,
26
+ height=512, width=512,
27
+ )
28
+ image.save("NexusGen-Edit_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_full/Step1X-Edit.py ADDED
@@ -0,0 +1,25 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from diffsynth import load_state_dict
4
+ from PIL import Image
5
+
6
+
7
+ pipe = FluxImagePipeline.from_pretrained(
8
+ torch_dtype=torch.bfloat16,
9
+ device="cuda",
10
+ model_configs=[
11
+ ModelConfig(model_id="Qwen/Qwen2.5-VL-7B-Instruct"),
12
+ ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="step1x-edit-i1258.safetensors"),
13
+ ModelConfig(model_id="stepfun-ai/Step1X-Edit", origin_file_pattern="vae.safetensors"),
14
+ ],
15
+ )
16
+ state_dict = load_state_dict("models/train/Step1X-Edit_full/epoch-0.safetensors")
17
+ pipe.dit.load_state_dict(state_dict)
18
+
19
+ image = pipe(
20
+ prompt="Make the dog turn its head around.",
21
+ step1x_reference_image=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
22
+ height=768, width=768, cfg_scale=6,
23
+ seed=0
24
+ )
25
+ image.save("image_Step1X-Edit_full.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLEX.2-preview.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+
4
+
5
+ pipe = FluxImagePipeline.from_pretrained(
6
+ torch_dtype=torch.bfloat16,
7
+ device="cuda",
8
+ model_configs=[
9
+ ModelConfig(model_id="ostris/Flex.2-preview", origin_file_pattern="Flex.2-preview.safetensors"),
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
13
+ ],
14
+ )
15
+ pipe.load_lora(pipe.dit, "models/train/FLEX.2-preview_lora/epoch-4.safetensors", alpha=1)
16
+
17
+ image = pipe(prompt="dog,white and brown dog, sitting on wall, under pink flowers", seed=0)
18
+ image.save("image_FLEX.2-preview_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Kontext-dev.py ADDED
@@ -0,0 +1,24 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from PIL import Image
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Kontext-dev", origin_file_pattern="flux1-kontext-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ],
15
+ )
16
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-Kontext-dev_lora/epoch-4.safetensors", alpha=1)
17
+
18
+ image = pipe(
19
+ prompt="Make the dog turn its head around.",
20
+ kontext_images=Image.open("data/example_image_dataset/2.jpg").resize((768, 768)),
21
+ height=768, width=768,
22
+ seed=0
23
+ )
24
+ image.save("image_FLUX.1-Kontext-dev_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-Krea-dev.py ADDED
@@ -0,0 +1,18 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+
4
+
5
+ pipe = FluxImagePipeline.from_pretrained(
6
+ torch_dtype=torch.bfloat16,
7
+ device="cuda",
8
+ model_configs=[
9
+ ModelConfig(model_id="black-forest-labs/FLUX.1-Krea-dev", origin_file_pattern="flux1-krea-dev.safetensors"),
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
13
+ ],
14
+ )
15
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-Krea-dev_lora/epoch-4.safetensors", alpha=1)
16
+
17
+ image = pipe(prompt="a dog", seed=0)
18
+ image.save("image_FLUX.1-Krea-dev_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-AttriCtrl.py ADDED
@@ -0,0 +1,19 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+
4
+
5
+ pipe = FluxImagePipeline.from_pretrained(
6
+ torch_dtype=torch.bfloat16,
7
+ device="cuda",
8
+ model_configs=[
9
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
13
+ ModelConfig(model_id="DiffSynth-Studio/AttriCtrl-FLUX.1-Dev", origin_file_pattern="models/brightness.safetensors")
14
+ ],
15
+ )
16
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-AttriCtrl_lora/epoch-3.safetensors", alpha=1)
17
+
18
+ image = pipe(prompt="a cat", seed=0, value_controller_inputs=0.1, rand_device="cuda")
19
+ image.save("image_FLUX.1-dev-AttriCtrl_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Inpainting-Beta.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from PIL import Image
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="alimama-creative/FLUX.1-dev-Controlnet-Inpainting-Beta", origin_file_pattern="diffusion_pytorch_model.safetensors"),
15
+ ],
16
+ )
17
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-Controlnet-Inpainting-Beta_lora/epoch-4.safetensors", alpha=1)
18
+
19
+ image = pipe(
20
+ prompt="a cat sitting on a chair, wearing sunglasses",
21
+ controlnet_inputs=[ControlNetInput(
22
+ image=Image.open("data/example_image_dataset/inpaint/image_1.jpg"),
23
+ inpaint_mask=Image.open("data/example_image_dataset/inpaint/mask.jpg"),
24
+ scale=0.9
25
+ )],
26
+ height=1024, width=1024,
27
+ seed=0, rand_device="cuda",
28
+ )
29
+ image.save("image_FLUX.1-dev-Controlnet-Inpainting-Beta_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Union-alpha.py ADDED
@@ -0,0 +1,29 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from PIL import Image
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="InstantX/FLUX.1-dev-Controlnet-Union-alpha", origin_file_pattern="diffusion_pytorch_model.safetensors"),
15
+ ],
16
+ )
17
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-Controlnet-Union-alpha_lora/epoch-4.safetensors", alpha=1)
18
+
19
+ image = pipe(
20
+ prompt="a dog",
21
+ controlnet_inputs=[ControlNetInput(
22
+ image=Image.open("data/example_image_dataset/canny/image_1.jpg"),
23
+ scale=0.9,
24
+ processor_id="canny",
25
+ )],
26
+ height=768, width=768,
27
+ seed=0, rand_device="cuda",
28
+ )
29
+ image.save("image_FLUX.1-dev-Controlnet-Union-alpha_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-Controlnet-Upscaler.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from PIL import Image
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="jasperai/Flux.1-dev-Controlnet-Upscaler", origin_file_pattern="diffusion_pytorch_model.safetensors"),
15
+ ],
16
+ )
17
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-Controlnet-Upscaler_lora/epoch-4.safetensors", alpha=1)
18
+
19
+ image = pipe(
20
+ prompt="a dog",
21
+ controlnet_inputs=[ControlNetInput(
22
+ image=Image.open("data/example_image_dataset/upscale/image_1.jpg"),
23
+ scale=0.9
24
+ )],
25
+ height=768, width=768,
26
+ seed=0, rand_device="cuda",
27
+ )
28
+ image.save("image_FLUX.1-dev-Controlnet-Upscaler_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-EliGen.py ADDED
@@ -0,0 +1,33 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from PIL import Image
3
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
4
+
5
+ pipe = FluxImagePipeline.from_pretrained(
6
+ torch_dtype=torch.bfloat16,
7
+ device="cuda",
8
+ model_configs=[
9
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
13
+ ],
14
+ )
15
+
16
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-EliGen_lora/epoch-4.safetensors", alpha=1)
17
+
18
+ entity_prompts = ["A beautiful girl", "sign 'Entity Control'", "shorts", "shirt"]
19
+ global_prompt = "A beautiful girl wearing shirt and shorts in the street, holding a sign 'Entity Control'"
20
+ masks = [Image.open(f"data/example_image_dataset/eligen/{i}.png").convert('RGB') for i in range(len(entity_prompts))]
21
+ # generate image
22
+ image = pipe(
23
+ prompt=global_prompt,
24
+ cfg_scale=1.0,
25
+ num_inference_steps=50,
26
+ embedded_guidance=3.5,
27
+ seed=42,
28
+ height=1024,
29
+ width=1024,
30
+ eligen_entity_prompts=entity_prompts,
31
+ eligen_entity_masks=masks,
32
+ )
33
+ image.save(f"EliGen_lora.png")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-IP-Adapter.py ADDED
@@ -0,0 +1,26 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig
3
+ from PIL import Image
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="InstantX/FLUX.1-dev-IP-Adapter", origin_file_pattern="ip-adapter.bin"),
15
+ ModelConfig(model_id="google/siglip-so400m-patch14-384"),
16
+ ],
17
+ )
18
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-IP-Adapter_lora/epoch-4.safetensors", alpha=1)
19
+
20
+ image = pipe(
21
+ prompt="dog,white and brown dog, sitting on wall, under pink flowers",
22
+ ipadapter_images=Image.open("data/example_image_dataset/1.jpg"),
23
+ height=768, width=768,
24
+ seed=0
25
+ )
26
+ image.save("image_FLUX.1-dev-IP-Adapter_lora.jpg")
exp_code/1_benchmark/DiffSynth-Studio/examples/flux/model_training/validate_lora/FLUX.1-dev-InfiniteYou.py ADDED
@@ -0,0 +1,28 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffsynth.pipelines.flux_image_new import FluxImagePipeline, ModelConfig, ControlNetInput
3
+ from PIL import Image
4
+
5
+
6
+ pipe = FluxImagePipeline.from_pretrained(
7
+ torch_dtype=torch.bfloat16,
8
+ device="cuda",
9
+ model_configs=[
10
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="flux1-dev.safetensors"),
11
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder/model.safetensors"),
12
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="text_encoder_2/"),
13
+ ModelConfig(model_id="black-forest-labs/FLUX.1-dev", origin_file_pattern="ae.safetensors"),
14
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/image_proj_model.bin"),
15
+ ModelConfig(model_id="ByteDance/InfiniteYou", origin_file_pattern="infu_flux_v1.0/aes_stage2/InfuseNetModel/*.safetensors"),
16
+ ],
17
+ )
18
+ pipe.load_lora(pipe.dit, "models/train/FLUX.1-dev-InfiniteYou_lora/epoch-4.safetensors", alpha=1)
19
+
20
+ image = pipe(
21
+ prompt="a man with a red hat",
22
+ controlnet_inputs=[ControlNetInput(
23
+ image=Image.open("data/example_image_dataset/infiniteyou/image_1.jpg"),
24
+ )],
25
+ height=1024, width=1024,
26
+ seed=0, rand_device="cuda",
27
+ )
28
+ image.save("image_FLUX.1-dev-InfiniteYou_lora.jpg")