Skip to content

Commit

Permalink
Less eval steps during training
Browse files Browse the repository at this point in the history
  • Loading branch information
anton-l committed Jul 21, 2022
1 parent 1345700 commit 06505ba
Showing 1 changed file with 9 additions and 6 deletions.
15 changes: 9 additions & 6 deletions examples/train_unconditional.py
Original file line number Diff line number Diff line change
Expand Up @@ -147,9 +147,9 @@ def transforms(examples):

accelerator.wait_for_everyone()

# Generate a sample image for visual inspection
# Generate sample images for visual inspection
if accelerator.is_main_process:
with torch.no_grad():
if epoch % args.save_images_epochs == 0 or epoch == args.num_epochs - 1:
pipeline = DDPMPipeline(
unet=accelerator.unwrap_model(ema_model.averaged_model if args.use_ema else model),
scheduler=noise_scheduler,
Expand All @@ -159,9 +159,11 @@ def transforms(examples):
# run pipeline in inference (sample random noise and denoise)
images = pipeline(generator=generator, batch_size=args.eval_batch_size, output_type="numpy")["sample"]

# denormalize the images and save to tensorboard
images_processed = (images * 255).round().astype("uint8")
accelerator.trackers[0].writer.add_images("test_samples", images_processed.transpose(0, 3, 1, 2), epoch)
# denormalize the images and save to tensorboard
images_processed = (images * 255).round().astype("uint8")
accelerator.trackers[0].writer.add_images(
"test_samples", images_processed.transpose(0, 3, 1, 2), epoch
)

if epoch % args.save_model_epochs == 0 or epoch == args.num_epochs - 1:
# save the model
Expand All @@ -184,7 +186,8 @@ def transforms(examples):
parser.add_argument("--train_batch_size", type=int, default=16)
parser.add_argument("--eval_batch_size", type=int, default=16)
parser.add_argument("--num_epochs", type=int, default=100)
parser.add_argument("--save_model_epochs", type=int, default=5)
parser.add_argument("--save_images_epochs", type=int, default=10)
parser.add_argument("--save_model_epochs", type=int, default=10)
parser.add_argument("--gradient_accumulation_steps", type=int, default=1)
parser.add_argument("--learning_rate", type=float, default=1e-4)
parser.add_argument("--lr_scheduler", type=str, default="cosine")
Expand Down

0 comments on commit 06505ba

Please sign in to comment.