From dd3fb52cf24b485d1ff6d1195ecc037f873edcad Mon Sep 17 00:00:00 2001 From: Jesse Engel Date: Wed, 15 Jul 2020 11:33:50 -0700 Subject: [PATCH] Default TPU optimization to much more common values `learning_rate=3e-4`, `batch_size=64` (a common source of errors). This will change the behavior for NSynth training for the original ICLR2020 paper on TPU, but can be reset to 1e-5 and 128 manually. PiperOrigin-RevId: 321403423 --- ddsp/training/gin/optimization/base_tpu.gin | 3 +-- ddsp/training/gin/papers/iclr2020/nsynth_ae.gin | 4 ++++ ddsp/version.py | 2 +- 3 files changed, 6 insertions(+), 3 deletions(-) diff --git a/ddsp/training/gin/optimization/base_tpu.gin b/ddsp/training/gin/optimization/base_tpu.gin index 10536b5c..e66bf38c 100644 --- a/ddsp/training/gin/optimization/base_tpu.gin +++ b/ddsp/training/gin/optimization/base_tpu.gin @@ -2,5 +2,4 @@ include 'optimization/base.gin' # Larger batch size for TPU. -learning_rate = 1e-5 -batch_size = 128 # (4x2, 8 per a core) +batch_size = 64 # (4x2, 4 per a core) diff --git a/ddsp/training/gin/papers/iclr2020/nsynth_ae.gin b/ddsp/training/gin/papers/iclr2020/nsynth_ae.gin index 9f5be138..0f69c360 100644 --- a/ddsp/training/gin/papers/iclr2020/nsynth_ae.gin +++ b/ddsp/training/gin/papers/iclr2020/nsynth_ae.gin @@ -1,3 +1,7 @@ # -*-Python-*- include 'models/ae.gin' include 'datasets/nsynth.gin' + +# To recreate original experiment optimization params, uncomment lines below. +# learning_rate = 1e-5 +# batch_size = 128 diff --git a/ddsp/version.py b/ddsp/version.py index 50c36413..24788201 100644 --- a/ddsp/version.py +++ b/ddsp/version.py @@ -19,4 +19,4 @@ pulling in all the dependencies in __init__.py. """ -__version__ = '0.7.0' +__version__ = '0.8.0'