Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Fix/wavelet activation #181

Merged
merged 2 commits into from
Feb 7, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
13 changes: 10 additions & 3 deletions simulai/models/_pytorch_models/_transformer.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,13 +8,14 @@


class BaseTemplate(NetworkTemplate):
def __init__(self):
def __init__(self, device:str="cpu"):
"""Template used for sharing fundamental methods with the
children transformer-like encoders and decoders.

"""

super(BaseTemplate, self).__init__()
self.device = device

def _activation_getter(
self, activation: Union[str, torch.nn.Module]
Expand All @@ -33,7 +34,9 @@ def _activation_getter(
if isinstance(activation, torch.nn.Module):
return encoder_activation
elif isinstance(activation, str):
return self._get_operation(operation=activation, is_activation=True)
act = self._get_operation(operation=activation, is_activation=True)
act.setup(device=self.device)
return act
else:
raise Exception(f"The activation {activation} is not supported.")

Expand All @@ -45,6 +48,7 @@ def __init__(
activation: Union[str, torch.nn.Module] = "relu",
mlp_layer: torch.nn.Module = None,
embed_dim: Union[int, Tuple] = None,
device:str="cpu",
) -> None:
"""Generic transformer encoder.

Expand All @@ -56,7 +60,7 @@ def __init__(

"""

super(BasicEncoder, self).__init__()
super(BasicEncoder, self).__init__(device=device)

self.num_heads = num_heads

Expand Down Expand Up @@ -107,6 +111,7 @@ def __init__(
activation: Union[str, torch.nn.Module] = "relu",
mlp_layer: torch.nn.Module = None,
embed_dim: Union[int, Tuple] = None,
device:str="cpu",
):
"""Generic transformer decoder.

Expand Down Expand Up @@ -253,6 +258,7 @@ def __init__(
activation=self.encoder_activation,
mlp_layer=self.encoder_mlp_layers_list[e],
embed_dim=self.embed_dim_encoder,
device=self.device,
)
for e in range(self.number_of_encoders)
]
Expand All @@ -266,6 +272,7 @@ def __init__(
activation=self.decoder_activation,
mlp_layer=self.decoder_mlp_layers_list[d],
embed_dim=self.embed_dim_decoder,
device=self.device,
)
for d in range(self.number_of_decoders)
]
Expand Down
17 changes: 16 additions & 1 deletion simulai/templates/_pytorch_network.py
Original file line number Diff line number Diff line change
Expand Up @@ -169,10 +169,25 @@ def _setup_activations(
# It instantiates an operation x^l = \sigma(y^l), in which y^l
# is the output of the previous linear operation.
if isinstance(activation, str):
# Testing to instantiate an example of activation function.
activation_op = self._get_operation(operation=activation)

if isinstance(activation_op, simulact.TrainableActivation):

activations_list = [self._get_operation(operation=activation,
is_activation=True)
for i in range(n_layers - 1)]

for aa, act in enumerate(activations_list):
act.setup(device=self.device_type)
activations_list[aa] = act

else:
activations_list = [self._get_operation(operation=activation)
for i in range(n_layers - 1)]

return (
[self._get_operation(operation=activation) for i in range(n_layers - 1)]
activations_list
+ [self._get_operation(operation=self.default_last_activation)],
(n_layers - 1) * [activation] + [self.default_last_activation],
)
Expand Down
Loading