Skip to content

Commit 599047e

Browse files
authored
Merge pull request #97 from jdb78/maintenance/pytorch-lightning-1.0.0rc4
Rename LearningRateLogger and reposition EarlyStopping callback for l…
2 parents c07d5ac + 9eeafb0 commit 599047e

File tree

10 files changed

+43
-33
lines changed

10 files changed

+43
-33
lines changed

README.md

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -54,7 +54,7 @@ documentation with detailed tutorials.
5454

5555
```python
5656
import pytorch_lightning as pl
57-
from pytorch_lightning.callbacks import EarlyStopping
57+
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
5858

5959
from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer
6060

@@ -89,14 +89,13 @@ val_dataloader = validation.to_dataloader(train=False, batch_size=batch_size, nu
8989

9090

9191
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min")
92-
lr_logger = LearningRateLogger()
92+
lr_logger = LearningRateMonitor()
9393
trainer = pl.Trainer(
9494
max_epochs=100,
9595
gpus=0,
9696
gradient_clip_val=0.1,
97-
early_stop_callback=early_stop_callback,
9897
limit_train_batches=30,
99-
callbacks=[lr_logger],
98+
callbacks=[lr_logger, early_stop_callback],
10099
)
101100

102101

docs/source/getting-started.rst

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -62,7 +62,7 @@ Example
6262
.. code-block:: python
6363
6464
import pytorch_lightning as pl
65-
from pytorch_lightning.callbacks import EarlyStopping
65+
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
6666
6767
from pytorch_forecasting import TimeSeriesDataSet, TemporalFusionTransformer
6868
@@ -98,14 +98,13 @@ Example
9898
9999
# define trainer with early stopping
100100
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=1, verbose=False, mode="min")
101-
lr_logger = LearningRateLogger()
101+
lr_logger = LearningRateMonitor()
102102
trainer = pl.Trainer(
103103
max_epochs=100,
104104
gpus=0,
105105
gradient_clip_val=0.1,
106-
early_stop_callback=early_stop_callback,
107106
limit_train_batches=30,
108-
callbacks=[lr_logger],
107+
callbacks=[lr_logger, early_stop_callback],
109108
)
110109
111110
# create the model

docs/source/tutorials/ar.ipynb

Lines changed: 2 additions & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -723,7 +723,7 @@
723723
" gpus=0,\n",
724724
" weights_summary=\"top\",\n",
725725
" gradient_clip_val=0.1,\n",
726-
" early_stop_callback=early_stop_callback,\n",
726+
" callbacks=[early_stop_callback],\n",
727727
" limit_train_batches=30,\n",
728728
")\n",
729729
"\n",
@@ -1097,7 +1097,7 @@
10971097
"name": "python",
10981098
"nbconvert_exporter": "python",
10991099
"pygments_lexer": "ipython3",
1100-
"version": "3.7.7"
1100+
"version": "3.8.3"
11011101
}
11021102
},
11031103
"nbformat": 4,

docs/source/tutorials/stallion.ipynb

Lines changed: 19 additions & 7 deletions
Large diffs are not rendered by default.

examples/ar.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import pandas as pd
77
from pandas.core.common import SettingWithCopyWarning
88
import pytorch_lightning as pl
9-
from pytorch_lightning.callbacks import EarlyStopping, LearningRateLogger
9+
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
1010
from pytorch_lightning.loggers import TensorBoardLogger
1111
import torch
1212

@@ -64,20 +64,19 @@
6464
validation.save("validation.pkl")
6565

6666
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=5, verbose=False, mode="min")
67-
lr_logger = LearningRateLogger()
67+
lr_logger = LearningRateMonitor()
6868

6969
trainer = pl.Trainer(
7070
max_epochs=100,
7171
gpus=0,
7272
weights_summary="top",
7373
gradient_clip_val=0.1,
74-
early_stop_callback=early_stop_callback,
7574
limit_train_batches=30,
7675
limit_val_batches=3,
7776
# fast_dev_run=True,
7877
# logger=logger,
7978
# profiler=True,
80-
callbacks=[lr_logger],
79+
callbacks=[lr_logger, early_stop_callback],
8180
)
8281

8382

examples/nbeats.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@
5555
gpus=0,
5656
weights_summary="top",
5757
gradient_clip_val=0.1,
58-
early_stop_callback=early_stop_callback,
58+
callbacks=[early_stop_callback],
5959
limit_train_batches=15,
6060
# limit_val_batches=1,
6161
# fast_dev_run=True,

examples/stallion.py

Lines changed: 3 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -6,7 +6,7 @@
66
import pandas as pd
77
from pandas.core.common import SettingWithCopyWarning
88
import pytorch_lightning as pl
9-
from pytorch_lightning.callbacks import EarlyStopping, LearningRateLogger
9+
from pytorch_lightning.callbacks import EarlyStopping, LearningRateMonitor
1010
from pytorch_lightning.loggers import TensorBoardLogger
1111
import torch
1212

@@ -93,21 +93,20 @@
9393
validation.save("validation.pkl")
9494

9595
early_stop_callback = EarlyStopping(monitor="val_loss", min_delta=1e-4, patience=10, verbose=False, mode="min")
96-
lr_logger = LearningRateLogger()
96+
lr_logger = LearningRateMonitor()
9797

9898
trainer = pl.Trainer(
9999
max_epochs=100,
100100
gpus=0,
101101
weights_summary="top",
102102
gradient_clip_val=0.1,
103-
early_stop_callback=early_stop_callback,
104103
limit_train_batches=30,
105104
# val_check_interval=20,
106105
# limit_val_batches=1,
107106
# fast_dev_run=True,
108107
# logger=logger,
109108
# profiler=True,
110-
callbacks=[lr_logger],
109+
callbacks=[lr_logger, early_stop_callback],
111110
)
112111

113112

pytorch_forecasting/models/temporal_fusion_transformer/tuning.py

Lines changed: 7 additions & 4 deletions
Original file line numberDiff line numberDiff line change
@@ -9,7 +9,7 @@
99
from optuna.integration import PyTorchLightningPruningCallback, TensorBoardCallback
1010
import pytorch_lightning as pl
1111
from pytorch_lightning import Callback
12-
from pytorch_lightning.callbacks import LearningRateLogger
12+
from pytorch_lightning.callbacks import LearningRateMonitor
1313
from pytorch_lightning.loggers import TensorBoardLogger
1414
import statsmodels.api as sm
1515
import torch
@@ -99,16 +99,19 @@ def objective(trial: optuna.Trial) -> float:
9999
# TensorBoard. We don't use any logger here as it requires us to implement several abstract
100100
# methods. Instead we setup a simple callback, that saves metrics from each validation step.
101101
metrics_callback = MetricsCallback()
102-
learning_rate_callback = LearningRateLogger()
102+
learning_rate_callback = LearningRateMonitor()
103103
logger = TensorBoardLogger(log_dir, name="optuna", version=trial.number)
104104
gradient_clip_val = trial.suggest_loguniform("gradient_clip_val", *gradient_clip_val_range)
105105
trainer = pl.Trainer(
106106
checkpoint_callback=checkpoint_callback,
107107
max_epochs=max_epochs,
108108
gradient_clip_val=gradient_clip_val,
109109
gpus=[0] if torch.cuda.is_available() else None,
110-
callbacks=[metrics_callback, learning_rate_callback],
111-
early_stop_callback=PyTorchLightningPruningCallback(trial, monitor="val_loss"),
110+
callbacks=[
111+
metrics_callback,
112+
learning_rate_callback,
113+
PyTorchLightningPruningCallback(trial, monitor="val_loss"),
114+
],
112115
logger=logger,
113116
**trainer_kwargs,
114117
)

tests/test_models/test_nbeats.py

Lines changed: 1 addition & 2 deletions
Original file line numberDiff line numberDiff line change
@@ -4,7 +4,6 @@
44
from pytorch_lightning.callbacks import EarlyStopping, ModelCheckpoint
55
from pytorch_lightning.loggers import TensorBoardLogger
66

7-
from pytorch_forecasting.metrics import QuantileLoss
87
from pytorch_forecasting.models import NBeats
98

109

@@ -21,7 +20,7 @@ def test_integration(dataloaders_fixed_window_without_coveratiates, tmp_path, gp
2120
gpus=gpus,
2221
weights_summary="top",
2322
gradient_clip_val=0.1,
24-
early_stop_callback=early_stop_callback,
23+
callbacks=[early_stop_callback],
2524
fast_dev_run=True,
2625
logger=logger,
2726
)

tests/test_models/test_temporal_fusion_transformer.py

Lines changed: 1 addition & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@ def test_integration(multiple_dataloaders_with_coveratiates, tmp_path, gpus):
4040
gpus=gpus,
4141
weights_summary="top",
4242
gradient_clip_val=0.1,
43-
early_stop_callback=early_stop_callback,
43+
callbacks=[early_stop_callback],
4444
fast_dev_run=True,
4545
logger=logger,
4646
)

0 commit comments

Comments
 (0)