diff --git a/torchrl/modules/models/models.py b/torchrl/modules/models/models.py index 49c0b0961ab..cad4065f54a 100644 --- a/torchrl/modules/models/models.py +++ b/torchrl/modules/models/models.py @@ -955,7 +955,7 @@ class DuelingCnnDQNet(nn.Module): >>> cnn_kwargs = { ... 'num_cells': [32, 64, 64], ... 'strides': [4, 2, 1], - ... 'kernels': [8, 4, 3], + ... 'kernel_sizes': [8, 4, 3], ... } mlp_kwargs (dict or list of dicts, optional): kwargs for the advantage diff --git a/tutorials/sphinx-tutorials/getting-started-3.py b/tutorials/sphinx-tutorials/getting-started-3.py index 594cb7392c0..70ffe37a005 100644 --- a/tutorials/sphinx-tutorials/getting-started-3.py +++ b/tutorials/sphinx-tutorials/getting-started-3.py @@ -152,7 +152,7 @@ indices = buffer.extend(data) ################################# -# We can check that the buffer now has the same number of elements than what +# We can check that the buffer now has the same number of elements as what # we got from the collector: assert len(buffer) == collector.frames_per_batch @@ -174,7 +174,7 @@ # Next steps # ---------- # -# - You can have look at other multirpocessed +# - You can have look at other multiprocessed # collectors such as :class:`~torchrl.collectors.collectors.MultiSyncDataCollector` or # :class:`~torchrl.collectors.collectors.MultiaSyncDataCollector`. # - TorchRL also offers distributed collectors if you have multiple nodes to diff --git a/tutorials/sphinx-tutorials/rb_tutorial.py b/tutorials/sphinx-tutorials/rb_tutorial.py index 4f5ecb4936d..2a852f0e364 100644 --- a/tutorials/sphinx-tutorials/rb_tutorial.py +++ b/tutorials/sphinx-tutorials/rb_tutorial.py @@ -168,7 +168,7 @@ buffer_lazytensor = ReplayBuffer(storage=LazyTensorStorage(size)) ###################################################################### -# Let us create a batch of data of size ``torch.Size([3])` with 2 tensors +# Let us create a batch of data of size ``torch.Size([3])`` with 2 tensors # stored in it: #