-
Notifications
You must be signed in to change notification settings - Fork 384
[Feature] adding tensor classes annotation for loss functions #1905
New issue
Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.
By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.
Already on GitHub? Sign in to your account
base: main
Are you sure you want to change the base?
Changes from all commits
e4761a3
5d432c8
387953f
8e16b63
bfb5930
bfde82f
7473445
8163f90
b21e43e
60e3d51
44a70e6
23ef8ea
191ab2e
1e373ca
3f058e1
582c9c5
79d8a29
64837f9
715d4c0
5bb8894
7c0ae77
e9125fb
bae4237
e17c91e
f07c4f4
8b5e0ff
73a4dcd
9b5f4e6
File filter
Filter by extension
Conversations
Jump to
Diff view
Diff view
There are no files selected for viewing
Original file line number | Diff line number | Diff line change |
---|---|---|
|
@@ -11,12 +11,12 @@ | |
from typing import Tuple | ||
|
||
import torch | ||
from tensordict import TensorDict, TensorDictBase | ||
from tensordict import tensorclass, TensorDict, TensorDictBase | ||
from tensordict.nn import dispatch, ProbabilisticTensorDictSequential, TensorDictModule | ||
from tensordict.utils import NestedKey | ||
from torch import distributions as d | ||
|
||
from torchrl.objectives.common import LossModule | ||
from torchrl.objectives.common import LossContainerBase, LossModule | ||
|
||
from torchrl.objectives.utils import ( | ||
_cache_values, | ||
|
@@ -36,6 +36,17 @@ | |
) | ||
|
||
|
||
@tensorclass | ||
class A2CLosses(LossContainerBase): | ||
"""The tensorclass for The A2CLoss Loss class.""" | ||
|
||
loss_actor: torch.Tensor | ||
loss_objective: torch.Tensor | ||
loss_critic: torch.Tensor | None = None | ||
loss_entropy: torch.Tensor | None = None | ||
entropy: torch.Tensor | None = None | ||
|
||
|
||
class A2CLoss(LossModule): | ||
"""TorchRL implementation of the A2C loss. | ||
|
||
|
@@ -137,6 +148,16 @@ class A2CLoss(LossModule): | |
batch_size=torch.Size([]), | ||
device=None, | ||
is_shared=False) | ||
>>> loss = A2CLoss(actor, value, loss_critic_type="l2", return_tensorclass=True) | ||
>>> loss(data) | ||
A2CLosses( | ||
entropy=Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False), | ||
loss_critic=Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False), | ||
loss_entropy=Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False), | ||
loss_objective=Tensor(shape=torch.Size([]), device=cpu, dtype=torch.float32, is_shared=False), | ||
batch_size=torch.Size([]), | ||
device=None, | ||
is_shared=False) | ||
|
||
This class is compatible with non-tensordict based modules too and can be | ||
used without recurring to any tensordict-related primitive. In this case, | ||
|
@@ -182,7 +203,7 @@ class A2CLoss(LossModule): | |
method. | ||
|
||
Examples: | ||
>>> loss.select_out_keys('loss_objective', 'loss_critic') | ||
>>> _ = loss.select_out_keys('loss_objective', 'loss_critic') | ||
>>> loss_obj, loss_critic = loss( | ||
... observation = torch.randn(*batch, n_obs), | ||
... action = spec.rand(batch), | ||
|
@@ -248,6 +269,7 @@ def __init__( | |
functional: bool = True, | ||
actor: ProbabilisticTensorDictSequential = None, | ||
critic: ProbabilisticTensorDictSequential = None, | ||
return_tensorclass: bool = False, | ||
There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. Should be added to the docstrings There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. working on it. There was a problem hiding this comment. Choose a reason for hiding this commentThe reason will be displayed to describe this comment to others. Learn more. @vmoens added doctests for tensorclass changes. but I see some doctest issues and blockers. can you please help me resolve.
|
||
reduction: str = None, | ||
clip_value: float | None = None, | ||
): | ||
|
@@ -309,6 +331,21 @@ def __init__( | |
if gamma is not None: | ||
raise TypeError(_GAMMA_LMBDA_DEPREC_ERROR) | ||
self.loss_critic_type = loss_critic_type | ||
self.return_tensorclass = return_tensorclass | ||
|
||
if clip_value is not None: | ||
if isinstance(clip_value, float): | ||
clip_value = torch.tensor(clip_value) | ||
elif isinstance(clip_value, torch.Tensor): | ||
if clip_value.numel() != 1: | ||
raise ValueError( | ||
f"clip_value must be a float or a scalar tensor, got {clip_value}." | ||
) | ||
else: | ||
raise ValueError( | ||
f"clip_value must be a float or a scalar tensor, got {clip_value}." | ||
) | ||
self.register_buffer("clip_value", clip_value) | ||
|
||
if clip_value is not None: | ||
if isinstance(clip_value, float): | ||
|
@@ -502,7 +539,7 @@ def _cached_detach_critic_network_params(self): | |
return self.critic_network_params.detach() | ||
|
||
@dispatch() | ||
def forward(self, tensordict: TensorDictBase) -> TensorDictBase: | ||
def forward(self, tensordict: TensorDictBase) -> A2CLosses | TensorDictBase: | ||
tensordict = tensordict.clone(False) | ||
advantage = tensordict.get(self.tensor_keys.advantage, None) | ||
if advantage is None: | ||
|
@@ -523,6 +560,10 @@ def forward(self, tensordict: TensorDictBase) -> TensorDictBase: | |
if self.critic_coef: | ||
loss_critic, value_clip_fraction = self.loss_critic(tensordict) | ||
td_out.set("loss_critic", loss_critic) | ||
if self.return_tensorclass: | ||
return A2CLosses._from_tensordict(td_out) | ||
loss_critic, value_clip_fraction = self.loss_critic(tensordict) | ||
td_out.set("loss_critic", loss_critic) | ||
if value_clip_fraction is not None: | ||
td_out.set("value_clip_fraction", value_clip_fraction) | ||
td_out = td_out.named_apply( | ||
|
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
doens't it work if we make the base class a tensorclass?
There was a problem hiding this comment.
Choose a reason for hiding this comment
The reason will be displayed to describe this comment to others. Learn more.
Yes, It doesn't work.