|
|
from causvid.data import ODERegressionLMDBDataset |
|
|
from causvid.models import get_block_class |
|
|
from causvid.data import TextDataset |
|
|
from causvid.util import ( |
|
|
launch_distributed_job, |
|
|
prepare_for_saving, |
|
|
set_seed, init_logging_folder, |
|
|
fsdp_wrap, cycle, |
|
|
fsdp_state_dict, |
|
|
barrier |
|
|
) |
|
|
import torch.distributed as dist |
|
|
from omegaconf import OmegaConf |
|
|
from causvid.dmd import DMD |
|
|
import argparse |
|
|
import torch |
|
|
import wandb |
|
|
import time |
|
|
import os |
|
|
|
|
|
|
|
|
class Trainer: |
|
|
def __init__(self, config): |
|
|
self.config = config |
|
|
|
|
|
|
|
|
torch.backends.cuda.matmul.allow_tf32 = True |
|
|
torch.backends.cudnn.allow_tf32 = True |
|
|
|
|
|
launch_distributed_job() |
|
|
global_rank = dist.get_rank() |
|
|
|
|
|
self.dtype = torch.bfloat16 if config.mixed_precision else torch.float32 |
|
|
self.device = torch.cuda.current_device() |
|
|
self.is_main_process = global_rank == 0 |
|
|
|
|
|
|
|
|
if config.seed == 0: |
|
|
random_seed = torch.randint(0, 10000000, (1,), device=self.device) |
|
|
dist.broadcast(random_seed, src=0) |
|
|
config.seed = random_seed.item() |
|
|
|
|
|
set_seed(config.seed + global_rank) |
|
|
|
|
|
if self.is_main_process: |
|
|
|
|
|
self.output_path, self.wandb_folder = "output", "output_wandb" |
|
|
|
|
|
|
|
|
if config.distillation_loss == "dmd": |
|
|
self.distillation_model = DMD(config, device=self.device) |
|
|
else: |
|
|
raise ValueError("Invalid distillation loss type") |
|
|
|
|
|
self.distillation_model.generator = fsdp_wrap( |
|
|
self.distillation_model.generator, |
|
|
sharding_strategy=config.sharding_strategy, |
|
|
mixed_precision=config.mixed_precision, |
|
|
wrap_strategy=config.generator_fsdp_wrap_strategy, |
|
|
transformer_module=(get_block_class(config.generator_fsdp_transformer_module), |
|
|
) if config.generator_fsdp_wrap_strategy == "transformer" else None |
|
|
) |
|
|
|
|
|
self.distillation_model.real_score = fsdp_wrap( |
|
|
self.distillation_model.real_score, |
|
|
sharding_strategy=config.sharding_strategy, |
|
|
mixed_precision=config.mixed_precision, |
|
|
wrap_strategy=config.real_score_fsdp_wrap_strategy, |
|
|
transformer_module=(get_block_class(config.real_score_fsdp_transformer_module), |
|
|
) if config.real_score_fsdp_wrap_strategy == "transformer" else None |
|
|
) |
|
|
|
|
|
self.distillation_model.fake_score = fsdp_wrap( |
|
|
self.distillation_model.fake_score, |
|
|
sharding_strategy=config.sharding_strategy, |
|
|
mixed_precision=config.mixed_precision, |
|
|
wrap_strategy=config.fake_score_fsdp_wrap_strategy, |
|
|
transformer_module=(get_block_class(config.fake_score_fsdp_transformer_module), |
|
|
) if config.fake_score_fsdp_wrap_strategy == "transformer" else None |
|
|
) |
|
|
|
|
|
self.distillation_model.text_encoder = fsdp_wrap( |
|
|
self.distillation_model.text_encoder, |
|
|
sharding_strategy=config.sharding_strategy, |
|
|
mixed_precision=config.mixed_precision, |
|
|
wrap_strategy=config.text_encoder_fsdp_wrap_strategy, |
|
|
transformer_module=(get_block_class(config.text_encoder_fsdp_transformer_module), |
|
|
) if config.text_encoder_fsdp_wrap_strategy == "transformer" else None |
|
|
) |
|
|
|
|
|
if not config.no_visualize: |
|
|
self.distillation_model.vae = self.distillation_model.vae.to( |
|
|
device=self.device, dtype=torch.bfloat16 if config.mixed_precision else torch.float32) |
|
|
|
|
|
self.generator_optimizer = torch.optim.AdamW( |
|
|
[param for param in self.distillation_model.generator.parameters() |
|
|
if param.requires_grad], |
|
|
lr=config.lr, |
|
|
betas=(config.beta1, config.beta2) |
|
|
) |
|
|
|
|
|
self.critic_optimizer = torch.optim.AdamW( |
|
|
[param for param in self.distillation_model.fake_score.parameters() |
|
|
if param.requires_grad], |
|
|
lr=config.lr, |
|
|
betas=(config.beta1, config.beta2) |
|
|
) |
|
|
|
|
|
|
|
|
|
|
|
self.backward_simulation = getattr(config, "backward_simulation", True) |
|
|
|
|
|
if self.backward_simulation: |
|
|
dataset = TextDataset(config.data_path) |
|
|
else: |
|
|
dataset = ODERegressionLMDBDataset( |
|
|
config.data_path, max_pair=int(1e8)) |
|
|
sampler = torch.utils.data.distributed.DistributedSampler( |
|
|
dataset, shuffle=True, drop_last=True) |
|
|
dataloader = torch.utils.data.DataLoader( |
|
|
dataset, batch_size=config.batch_size, sampler=sampler) |
|
|
self.dataloader = cycle(dataloader) |
|
|
|
|
|
self.step = 0 |
|
|
self.max_grad_norm = 10.0 |
|
|
self.previous_time = None |
|
|
|
|
|
def save(self): |
|
|
print("Start gathering distributed model states...") |
|
|
generator_state_dict = fsdp_state_dict( |
|
|
self.distillation_model.generator) |
|
|
critic_state_dict = fsdp_state_dict( |
|
|
self.distillation_model.fake_score) |
|
|
state_dict = { |
|
|
"generator": generator_state_dict, |
|
|
"critic": critic_state_dict |
|
|
} |
|
|
|
|
|
if self.is_main_process: |
|
|
os.makedirs(os.path.join(self.output_path, |
|
|
f"checkpoint_model_{self.step:06d}"), exist_ok=True) |
|
|
torch.save(state_dict, os.path.join(self.output_path, |
|
|
f"checkpoint_model_{self.step:06d}", "model.pt")) |
|
|
print("Model saved to", os.path.join(self.output_path, |
|
|
f"checkpoint_model_{self.step:06d}", "model.pt")) |
|
|
|
|
|
def train_one_step(self): |
|
|
self.distillation_model.eval() |
|
|
|
|
|
TRAIN_GENERATOR = self.step % self.config.dfake_gen_update_ratio == 0 |
|
|
VISUALIZE = self.step % self.config.log_iters == 0 and not self.config.no_visualize |
|
|
|
|
|
if self.step % 20 == 0: |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
|
|
|
if not self.backward_simulation: |
|
|
batch = next(self.dataloader) |
|
|
text_prompts = batch["prompts"] |
|
|
clean_latent = batch["ode_latent"][:, -1].to( |
|
|
device=self.device, dtype=self.dtype) |
|
|
else: |
|
|
text_prompts = next(self.dataloader) |
|
|
clean_latent = None |
|
|
|
|
|
batch_size = len(text_prompts) |
|
|
image_or_video_shape = list(self.config.image_or_video_shape) |
|
|
image_or_video_shape[0] = batch_size |
|
|
|
|
|
|
|
|
with torch.no_grad(): |
|
|
conditional_dict = self.distillation_model.text_encoder( |
|
|
text_prompts=text_prompts) |
|
|
|
|
|
if not getattr(self, "unconditional_dict", None): |
|
|
unconditional_dict = self.distillation_model.text_encoder( |
|
|
text_prompts=[self.config.negative_prompt] * batch_size) |
|
|
unconditional_dict = {k: v.detach() |
|
|
for k, v in unconditional_dict.items()} |
|
|
self.unconditional_dict = unconditional_dict |
|
|
else: |
|
|
unconditional_dict = self.unconditional_dict |
|
|
|
|
|
|
|
|
if TRAIN_GENERATOR: |
|
|
generator_loss, generator_log_dict = self.distillation_model.generator_loss( |
|
|
image_or_video_shape=image_or_video_shape, |
|
|
conditional_dict=conditional_dict, |
|
|
unconditional_dict=unconditional_dict, |
|
|
clean_latent=clean_latent |
|
|
) |
|
|
|
|
|
self.generator_optimizer.zero_grad() |
|
|
generator_loss.backward() |
|
|
generator_grad_norm = self.distillation_model.generator.clip_grad_norm_( |
|
|
self.max_grad_norm) |
|
|
self.generator_optimizer.step() |
|
|
else: |
|
|
generator_log_dict = {} |
|
|
|
|
|
|
|
|
critic_loss, critic_log_dict = self.distillation_model.critic_loss( |
|
|
image_or_video_shape=image_or_video_shape, |
|
|
conditional_dict=conditional_dict, |
|
|
unconditional_dict=unconditional_dict, |
|
|
clean_latent=clean_latent |
|
|
) |
|
|
|
|
|
self.critic_optimizer.zero_grad() |
|
|
critic_loss.backward() |
|
|
critic_grad_norm = self.distillation_model.fake_score.clip_grad_norm_( |
|
|
self.max_grad_norm) |
|
|
self.critic_optimizer.step() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
def train(self): |
|
|
while True: |
|
|
self.train_one_step() |
|
|
if (not self.config.no_save) and self.step % self.config.log_iters == 0: |
|
|
self.save() |
|
|
torch.cuda.empty_cache() |
|
|
|
|
|
barrier() |
|
|
if self.is_main_process: |
|
|
current_time = time.time() |
|
|
if self.previous_time is None: |
|
|
self.previous_time = current_time |
|
|
else: |
|
|
|
|
|
|
|
|
self.previous_time = current_time |
|
|
|
|
|
self.step += 1 |
|
|
|
|
|
|
|
|
def main(): |
|
|
parser = argparse.ArgumentParser() |
|
|
parser.add_argument("--config_path", type=str, required=True) |
|
|
parser.add_argument("--local_rank", type=int, default=-1) |
|
|
parser.add_argument("--no_save", action="store_true") |
|
|
parser.add_argument("--no_visualize", action="store_true") |
|
|
|
|
|
args = parser.parse_args() |
|
|
|
|
|
config = OmegaConf.load(args.config_path) |
|
|
config.no_save = args.no_save |
|
|
config.no_visualize = args.no_visualize |
|
|
|
|
|
trainer = Trainer(config) |
|
|
trainer.train() |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
if __name__ == "__main__": |
|
|
main() |
|
|
|