|
9 | 9 | from optuna.integration import PyTorchLightningPruningCallback, TensorBoardCallback |
10 | 10 | import pytorch_lightning as pl |
11 | 11 | from pytorch_lightning import Callback |
12 | | -from pytorch_lightning.callbacks import LearningRateLogger |
| 12 | +from pytorch_lightning.callbacks import LearningRateMonitor |
13 | 13 | from pytorch_lightning.loggers import TensorBoardLogger |
14 | 14 | import statsmodels.api as sm |
15 | 15 | import torch |
@@ -99,16 +99,19 @@ def objective(trial: optuna.Trial) -> float: |
99 | 99 | # TensorBoard. We don't use any logger here as it requires us to implement several abstract |
100 | 100 | # methods. Instead we setup a simple callback, that saves metrics from each validation step. |
101 | 101 | metrics_callback = MetricsCallback() |
102 | | - learning_rate_callback = LearningRateLogger() |
| 102 | + learning_rate_callback = LearningRateMonitor() |
103 | 103 | logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number) |
104 | 104 | gradient_clip_val = trial.suggest_loguniform("gradient_clip_val", *gradient_clip_val_range) |
105 | 105 | trainer = pl.Trainer( |
106 | 106 | checkpoint_callback=checkpoint_callback, |
107 | 107 | max_epochs=max_epochs, |
108 | 108 | gradient_clip_val=gradient_clip_val, |
109 | 109 | gpus=[0] if torch.cuda.is_available() else None, |
110 | | - callbacks=[metrics_callback, learning_rate_callback], |
111 | | - early_stop_callback=PyTorchLightningPruningCallback(trial, monitor="val_loss"), |
| 110 | + callbacks=[ |
| 111 | + metrics_callback, |
| 112 | + learning_rate_callback, |
| 113 | + PyTorchLightningPruningCallback(trial, monitor="val_loss"), |
| 114 | + ], |
112 | 115 | logger=logger, |
113 | 116 | **trainer_kwargs, |
114 | 117 | ) |
|
0 commit comments