| | """ |
| | Train a diffusion model on images. |
| | """ |
| | import cv2 |
| | from pathlib import Path |
| | import imageio |
| | import random |
| | import json |
| | import sys |
| | import os |
| |
|
| | from tqdm import tqdm |
| | sys.path.append('.') |
| | import torch.distributed as dist |
| |
|
| | import traceback |
| |
|
| | import torch as th |
| | import torch.multiprocessing as mp |
| | import numpy as np |
| |
|
| | import argparse |
| | import dnnlib |
| | from guided_diffusion import dist_util, logger |
| | from guided_diffusion.script_util import ( |
| | args_to_dict, |
| | add_dict_to_argparser, |
| | ) |
| | |
| | from nsr.train_nv_util import TrainLoop3DRecNV, TrainLoop3DRec, TrainLoop3DRecNVPatch |
| | from nsr.script_util import create_3DAE_model, encoder_and_nsr_defaults, loss_defaults, rendering_options_defaults, eg3d_options_default |
| | |
| | from nsr.losses.builder import E3DGELossClass |
| | from datasets.eg3d_dataset import LMDBDataset_MV_Compressed_eg3d |
| | from dnnlib.util import EasyDict, InfiniteSampler |
| |
|
| | from pdb import set_trace as st |
| |
|
| | |
| |
|
| |
|
| |
|
| | def training_loop(args): |
| | |
| | dist_util.setup_dist(args) |
| | |
| | th.autograd.set_detect_anomaly(False) |
| | |
| |
|
| | SEED = args.seed |
| |
|
| | |
| | |
| | th.cuda.set_device(args.local_rank) |
| | th.cuda.empty_cache() |
| |
|
| | |
| | th.cuda.manual_seed_all(SEED) |
| | np.random.seed(SEED) |
| | random.seed(SEED) |
| |
|
| | |
| | logger.configure(dir=args.logdir) |
| |
|
| | logger.log("creating encoder and NSR decoder...") |
| | |
| | |
| |
|
| | |
| | opts = eg3d_options_default() |
| |
|
| | if args.sr_training: |
| | args.sr_kwargs = dnnlib.EasyDict( |
| | channel_base=opts.cbase, |
| | channel_max=opts.cmax, |
| | fused_modconv_default='inference_only', |
| | use_noise=True |
| | ) |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | logger.log("creating data loader...") |
| | |
| | |
| |
|
| | |
| | if args.objv_dataset: |
| | from datasets.g_buffer_objaverse import load_data, load_dataset, load_eval_data, load_memory_data |
| | else: |
| | from datasets.shapenet import load_data, load_eval_data, load_memory_data, load_dataset |
| |
|
| | |
| | if args.overfitting: |
| | data = load_memory_data( |
| | file_path=args.data_dir, |
| | batch_size=args.batch_size, |
| | reso=args.image_size, |
| | reso_encoder=args.image_size_encoder, |
| | num_workers=args.num_workers, |
| | |
| | load_depth=True |
| | ) |
| | else: |
| | if args.cfg in ['ffhq' ]: |
| | training_set = LMDBDataset_MV_Compressed_eg3d( |
| | args.data_dir, |
| | args.image_size, |
| | args.image_size_encoder, |
| | ) |
| | training_set_sampler = InfiniteSampler( |
| | dataset=training_set, |
| | rank=dist_util.get_rank(), |
| | num_replicas=dist_util.get_world_size(), |
| | seed=SEED) |
| |
|
| | data = iter( |
| | th.utils.data.DataLoader( |
| | dataset=training_set, |
| | sampler=training_set_sampler, |
| | batch_size=args.batch_size, |
| | pin_memory=True, |
| | num_workers=args.num_workers, |
| | persistent_workers=args.num_workers>0, |
| | prefetch_factor=max(8//args.batch_size, 2), |
| | )) |
| |
|
| | else: |
| | |
| | |
| | loader = load_dataset( |
| | file_path=args.data_dir, |
| | batch_size=args.batch_size, |
| | reso=args.image_size, |
| | reso_encoder=args.image_size_encoder, |
| | num_workers=args.num_workers, |
| | load_depth=True, |
| | preprocess=None, |
| | dataset_size=args.dataset_size, |
| | trainer_name=args.trainer_name, |
| | use_lmdb=args.use_lmdb, |
| | infi_sampler=False, |
| | |
| | |
| | ) |
| | if args.pose_warm_up_iter > 0: |
| | overfitting_dataset = load_memory_data( |
| | file_path=args.data_dir, |
| | batch_size=args.batch_size, |
| | reso=args.image_size, |
| | reso_encoder=args.image_size_encoder, |
| | num_workers=args.num_workers, |
| | |
| | load_depth=True |
| | ) |
| | data = [data, overfitting_dataset, args.pose_warm_up_iter] |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | args.img_size = [args.image_size_encoder] |
| | |
| | |
| | |
| |
|
| | |
| |
|
| | |
| | dist_util.synchronize() |
| |
|
| | |
| |
|
| | opt = dnnlib.EasyDict(args_to_dict(args, loss_defaults().keys())) |
| | |
| | |
| |
|
| | |
| |
|
| | logger.log("training...") |
| |
|
| | |
| | |
| | |
| | |
| | |
| |
|
| | |
| | |
| | |
| | |
| | |
| | number = 0 |
| | |
| | |
| | |
| | for idx, batch in enumerate(tqdm(loader)): |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| | |
| |
|
| | pass |
| |
|
| |
|
| | def create_argparser(**kwargs): |
| | |
| |
|
| | defaults = dict( |
| | seed=0, |
| | dataset_size=-1, |
| | trainer_name='input_rec', |
| | use_amp=False, |
| | overfitting=False, |
| | num_workers=4, |
| | image_size=128, |
| | image_size_encoder=224, |
| | iterations=150000, |
| | anneal_lr=False, |
| | lr=5e-5, |
| | weight_decay=0.0, |
| | lr_anneal_steps=0, |
| | batch_size=1, |
| | eval_batch_size=12, |
| | microbatch=-1, |
| | ema_rate="0.9999", |
| | log_interval=50, |
| | eval_interval=2500, |
| | save_interval=10000, |
| | resume_checkpoint="", |
| | use_fp16=False, |
| | fp16_scale_growth=1e-3, |
| | data_dir="", |
| | eval_data_dir="", |
| | |
| | logdir="/mnt/lustre/yslan/logs/nips23/", |
| | |
| | pose_warm_up_iter=-1, |
| | use_lmdb=False, |
| | objv_dataset=False, |
| | ) |
| |
|
| | defaults.update(encoder_and_nsr_defaults()) |
| | defaults.update(loss_defaults()) |
| |
|
| | parser = argparse.ArgumentParser() |
| | add_dict_to_argparser(parser, defaults) |
| |
|
| | return parser |
| |
|
| |
|
| | if __name__ == "__main__": |
| | |
| | |
| | |
| | |
| |
|
| | args = create_argparser().parse_args() |
| | args.local_rank = int(os.environ["LOCAL_RANK"]) |
| | args.gpus = th.cuda.device_count() |
| |
|
| | opts = args |
| |
|
| | args.rendering_kwargs = rendering_options_defaults(opts) |
| |
|
| | |
| | with open(os.path.join(args.logdir, 'args.json'), 'w') as f: |
| | json.dump(vars(args), f, indent=2) |
| |
|
| | |
| | print('Launching processes...') |
| |
|
| | try: |
| | training_loop(args) |
| | |
| | except Exception as e: |
| | |
| | traceback.print_exc() |
| | dist_util.cleanup() |
| |
|