-
Notifications
You must be signed in to change notification settings - Fork 483
Commit
This commit does not belong to any branch on this repository, and may belong to a fork outside of the repository.
* seasonality * plotting seasonalities fixed * glocal trend implemented * glocal trend implemented * black * black * starting glocal trend * modular code + seasonality with diff global/locals + glocal trend v1 done * Individual neural nets for future regressors almost done. some tests failing * Individual neural nets for future regressors done * shared neural networks for future regressors component * local seasonality can now be regularised by global seasonality * typo. using trend config property on seasonalityconfig * removing variables used for dev * changing names. Final tests premerging * Update neural_nets.py * black * debug yos * run yos on main * fix double compute of AR components * update debug notebooks --------- Co-authored-by: Oskar Triebe <[email protected]> Co-authored-by: leoniewgnr <[email protected]>
- Loading branch information
1 parent
a99059a
commit d4dffe9
Showing
31 changed files
with
28,175 additions
and
110 deletions.
There are no files selected for viewing
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
3,795 changes: 3,795 additions & 0 deletions
3,795
docs/source/how-to-guides/feature-guides/global_local_modeling_fut_regr.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
1,728 changes: 1,728 additions & 0 deletions
1,728
docs/source/how-to-guides/feature-guides/glocal_trend.ipynb
Large diffs are not rendered by default.
Oops, something went wrong.
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
131 changes: 131 additions & 0 deletions
131
neuralprophet/components/future_regressors/neural_nets.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,131 @@ | ||
from collections import OrderedDict | ||
|
||
import torch.nn as nn | ||
|
||
from neuralprophet.components.future_regressors import FutureRegressors | ||
from neuralprophet.utils_torch import init_parameter, interprete_model | ||
|
||
# from neuralprophet.utils_torch import init_parameter | ||
|
||
|
||
class NeuralNetsFutureRegressors(FutureRegressors): | ||
def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): | ||
super().__init__( | ||
config=config, | ||
n_forecasts=n_forecasts, | ||
quantiles=quantiles, | ||
id_list=id_list, | ||
device=device, | ||
config_trend_none_bool=config_trend_none_bool, | ||
) | ||
if self.regressors_dims is not None: | ||
# Regresors params | ||
self.regressor_nets = nn.ModuleDict({}) | ||
# TO DO: if no hidden layers, then just a as legacy | ||
self.d_hidden_regressors = config.d_hidden | ||
self.num_hidden_layers_regressors = config.num_hidden_layers | ||
# one net per regressor. to be adapted to combined network | ||
for regressor in self.regressors_dims.keys(): | ||
# Nets for both additive and multiplicative regressors | ||
regressor_net = nn.ModuleList() | ||
# This will be later 1 + static covariates | ||
d_inputs = 1 | ||
for i in range(self.num_hidden_layers_regressors): | ||
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True)) | ||
d_inputs = self.d_hidden_regressors | ||
# final layer has input size d_inputs and output size equal to no. of forecasts * no. of quantiles | ||
regressor_net.append(nn.Linear(d_inputs, self.n_forecasts * len(self.quantiles), bias=False)) | ||
for lay in regressor_net: | ||
nn.init.kaiming_normal_(lay.weight, mode="fan_in") | ||
self.regressor_nets[regressor] = regressor_net | ||
|
||
def get_reg_weights(self, name): | ||
""" | ||
Get attributions of regressors component network w.r.t. the model input. | ||
Parameters | ||
---------- | ||
name : string | ||
Regressor name | ||
Returns | ||
------- | ||
torch.tensor | ||
Weight corresponding to the given regressor | ||
""" | ||
|
||
reg_attributions = interprete_model( | ||
self, | ||
net="regressor_nets", | ||
forward_func="regressor", | ||
_num_in_features=self.regressor_nets[name][0].in_features, | ||
_num_out_features=self.regressor_nets[name][-1].out_features, | ||
additional_forward_args=name, | ||
) | ||
|
||
return reg_attributions | ||
|
||
def regressor(self, regressor_input, name): | ||
"""Compute single regressor component. | ||
Parameters | ||
---------- | ||
regressor_input : torch.Tensor, float | ||
regressor values at corresponding, dims: (batch, n_forecasts, 1) | ||
nam : str | ||
Name of regressor, for attribution to corresponding model weights | ||
Returns | ||
------- | ||
torch.Tensor | ||
Forecast component of dims (batch, n_forecasts, num_quantiles) | ||
""" | ||
x = regressor_input | ||
for i in range(self.num_hidden_layers_regressors + 1): | ||
if i > 0: | ||
x = nn.functional.relu(x) | ||
x = self.regressor_nets[name][i](x) | ||
|
||
# segment the last dimension to match the quantiles | ||
x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles)) | ||
return x | ||
|
||
def all_regressors(self, regressor_inputs, mode): | ||
"""Compute all regressors components. | ||
Parameters | ||
---------- | ||
regressor_inputs : torch.Tensor, float | ||
regressor values at corresponding, dims: (batch, n_forecasts, num_regressors) | ||
Returns | ||
------- | ||
torch.Tensor | ||
Forecast component of dims (batch, n_forecasts, num_quantiles) | ||
""" | ||
# Select only elements from OrderedDict that have the value mode == 'mode_of_interest' | ||
regressors_dims_filtered = OrderedDict((k, v) for k, v in self.regressors_dims.items() if v["mode"] == mode) | ||
for i, name in enumerate(regressors_dims_filtered.keys()): | ||
regressor_index = regressors_dims_filtered[name]["regressor_index"] | ||
regressor_input = regressor_inputs[:, :, regressor_index].unsqueeze(dim=2) | ||
if i == 0: | ||
x = self.regressor(regressor_input, name=name) | ||
if i > 0: | ||
x = x + self.regressor(regressor_input, name=name) | ||
return x | ||
|
||
def forward(self, inputs, mode, indeces=None): | ||
"""Compute all seasonality components. | ||
Parameters | ||
---------- | ||
f_r : torch.Tensor, float | ||
future regressors inputs | ||
mode: string, either "additive" or "multiplicative" | ||
mode of the regressors | ||
Returns | ||
------- | ||
torch.Tensor | ||
Forecast component of dims (batch, n_forecasts, no_quantiles) | ||
""" | ||
|
||
if "additive" == mode: | ||
f_r = self.all_regressors(inputs, mode="additive") | ||
if "multiplicative" == mode: | ||
f_r = self.all_regressors(inputs, mode="multiplicative") | ||
return f_r |
112 changes: 112 additions & 0 deletions
112
neuralprophet/components/future_regressors/shared_neural_nets.py
This file contains bidirectional Unicode text that may be interpreted or compiled differently than what appears below. To review, open the file in an editor that reveals hidden Unicode characters.
Learn more about bidirectional Unicode characters
Original file line number | Diff line number | Diff line change |
---|---|---|
@@ -0,0 +1,112 @@ | ||
from collections import Counter, OrderedDict | ||
|
||
import torch | ||
import torch.nn as nn | ||
|
||
from neuralprophet.components.future_regressors import FutureRegressors | ||
from neuralprophet.utils_torch import init_parameter, interprete_model | ||
|
||
# from neuralprophet.utils_torch import init_parameter | ||
|
||
|
||
class SharedNeuralNetsFutureRegressors(FutureRegressors): | ||
def __init__(self, config, id_list, quantiles, n_forecasts, device, config_trend_none_bool): | ||
super().__init__( | ||
config=config, | ||
n_forecasts=n_forecasts, | ||
quantiles=quantiles, | ||
id_list=id_list, | ||
device=device, | ||
config_trend_none_bool=config_trend_none_bool, | ||
) | ||
if self.regressors_dims is not None: | ||
# Regresors params | ||
self.regressor_nets = nn.ModuleDict({}) | ||
# TO DO: if no hidden layers, then just a as legacy | ||
self.d_hidden_regressors = config.d_hidden | ||
self.num_hidden_layers_regressors = config.num_hidden_layers | ||
# Combined network | ||
for net_i, size_i in Counter([x["mode"] for x in self.regressors_dims.values()]).items(): | ||
# Nets for both additive and multiplicative regressors | ||
regressor_net = nn.ModuleList() | ||
# This will be later size_i(1 + static covariates) | ||
d_inputs = size_i | ||
for i in range(self.num_hidden_layers_regressors): | ||
regressor_net.append(nn.Linear(d_inputs, self.d_hidden_regressors, bias=True)) | ||
d_inputs = self.d_hidden_regressors | ||
# final layer has input size d_inputs and output size equal to no. of forecasts * no. of quantiles | ||
regressor_net.append(nn.Linear(d_inputs, self.n_forecasts * len(self.quantiles), bias=False)) | ||
for lay in regressor_net: | ||
nn.init.kaiming_normal_(lay.weight, mode="fan_in") | ||
self.regressor_nets[net_i] = regressor_net | ||
|
||
def get_reg_weights(self, name): | ||
""" | ||
Get attributions of regressors component network w.r.t. the model input. | ||
Parameters | ||
---------- | ||
name : string | ||
Regressor name | ||
Returns | ||
------- | ||
torch.tensor | ||
Weight corresponding to the given regressor | ||
""" | ||
|
||
mode = self.config_regressors.regressors[name].mode | ||
reg_attributions = interprete_model( | ||
self, | ||
net="regressor_nets", | ||
forward_func="regressors_net", | ||
_num_in_features=self.regressor_nets[mode][0].in_features, | ||
_num_out_features=self.regressor_nets[mode][-1].out_features, | ||
additional_forward_args=mode, | ||
) | ||
|
||
regressor_index = self.regressors_dims[name]["regressor_index"] | ||
return reg_attributions[:, regressor_index].unsqueeze(-1) | ||
|
||
def regressors_net(self, regressor_inputs, mode): | ||
"""Compute single regressor component. | ||
Parameters | ||
---------- | ||
regressor_input : torch.Tensor, float | ||
regressor values at corresponding, dims: (batch, n_forecasts, 1) | ||
nam : str | ||
Name of regressor, for attribution to corresponding model weights | ||
Returns | ||
------- | ||
torch.Tensor | ||
Forecast component of dims (batch, n_forecasts, num_quantiles) | ||
""" | ||
x = regressor_inputs | ||
for i in range(self.num_hidden_layers_regressors + 1): | ||
if i > 0: | ||
x = nn.functional.relu(x) | ||
x = self.regressor_nets[mode][i](x) | ||
|
||
# segment the last dimension to match the quantiles | ||
x = x.reshape(x.shape[0], self.n_forecasts, len(self.quantiles)) | ||
return x | ||
|
||
def forward(self, inputs, mode, indeces=None): | ||
"""Compute all seasonality components. | ||
Parameters | ||
---------- | ||
f_r : torch.Tensor, float | ||
future regressors inputs | ||
mode: string, either "additive" or "multiplicative" | ||
mode of the regressors | ||
Returns | ||
------- | ||
torch.Tensor | ||
Forecast component of dims (batch, n_forecasts, no_quantiles) | ||
""" | ||
|
||
if "additive" == mode: | ||
f_r = self.regressors_net(inputs, mode="additive") | ||
if "multiplicative" == mode: | ||
f_r = self.regressors_net(inputs, mode="multiplicative") | ||
return f_r |
Oops, something went wrong.