diff --git a/examples/DeepONet/miscellaneous/notebooks/timeint_DeepONet_simple_manufactured_pinn.py b/examples/DeepONet/miscellaneous/notebooks/timeint_DeepONet_simple_manufactured_pinn.py index 1b3729a1..12cb6ec1 100644 --- a/examples/DeepONet/miscellaneous/notebooks/timeint_DeepONet_simple_manufactured_pinn.py +++ b/examples/DeepONet/miscellaneous/notebooks/timeint_DeepONet_simple_manufactured_pinn.py @@ -23,16 +23,17 @@ import os + import matplotlib.pyplot as plt import numpy as np +from simulai.file import SPFile +from simulai.optimization import Optimizer +from simulai.residuals import SymbolicOperator # In[3]: -from simulai.file import SPFile -from simulai.optimization import Optimizer -from simulai.residuals import SymbolicOperator # #### Basic configuration @@ -281,7 +282,6 @@ def model(): from scipy.integrate import odeint - # #### Pendulum numerical solver # In[ ]: diff --git a/examples/DeepONet/miscellaneous/scripts/polynomial_integration.py b/examples/DeepONet/miscellaneous/scripts/polynomial_integration.py index e0d6153a..7da6025d 100644 --- a/examples/DeepONet/miscellaneous/scripts/polynomial_integration.py +++ b/examples/DeepONet/miscellaneous/scripts/polynomial_integration.py @@ -1,9 +1,9 @@ import numpy as np import torch +from simulai.optimization import Optimizer from simulai.residuals import SymbolicOperator from simulai.tokens import Dot, Gp -from simulai.optimization import Optimizer def model(): diff --git a/examples/ESN-RC/scripts/independent_esn_rc_nonlinear_forcing.py b/examples/ESN-RC/scripts/independent_esn_rc_nonlinear_forcing.py index 273d00ac..5ffc639a 100644 --- a/examples/ESN-RC/scripts/independent_esn_rc_nonlinear_forcing.py +++ b/examples/ESN-RC/scripts/independent_esn_rc_nonlinear_forcing.py @@ -19,10 +19,8 @@ import matplotlib.pyplot as plt import numpy as np -from examples.utils.oscillator_solver import ( - oscillator_solver, - oscillator_solver_forcing, -) +from examples.utils.oscillator_solver import (oscillator_solver, + oscillator_solver_forcing) from simulai.models import ModelPool from simulai.regression import EchoStateNetwork from simulai.utilities import make_temp_directory diff --git a/examples/MapValid/map_valid_reshaper.py b/examples/MapValid/map_valid_reshaper.py index 76625b0d..0da91307 100644 --- a/examples/MapValid/map_valid_reshaper.py +++ b/examples/MapValid/map_valid_reshaper.py @@ -23,11 +23,8 @@ from simulai.io import BatchCopy, MapValid from simulai.metrics import L2Norm, MemorySizeEval from simulai.models import ModelPool -from simulai.normalization import ( - BatchNormalization, - UnitaryNormalization, - UnitarySymmetricalNormalization, -) +from simulai.normalization import (BatchNormalization, UnitaryNormalization, + UnitarySymmetricalNormalization) from simulai.rom import IPOD from simulai.simulation import Pipeline diff --git a/examples/OpInf/scripts/lorenz_96_chaotic.py b/examples/OpInf/scripts/lorenz_96_chaotic.py index a4322655..17930bac 100644 --- a/examples/OpInf/scripts/lorenz_96_chaotic.py +++ b/examples/OpInf/scripts/lorenz_96_chaotic.py @@ -13,7 +13,6 @@ # limitations under the License. import os - #!/usr/bin/env python import warnings diff --git a/examples/OpInf/scripts/lorenz_96_chaotic_multiple.py b/examples/OpInf/scripts/lorenz_96_chaotic_multiple.py index 7203e109..0fcb21c9 100644 --- a/examples/OpInf/scripts/lorenz_96_chaotic_multiple.py +++ b/examples/OpInf/scripts/lorenz_96_chaotic_multiple.py @@ -14,7 +14,6 @@ import os import time - #!/usr/bin/env python import warnings diff --git a/examples/OpInf/scripts/lorenz_96_stability.py b/examples/OpInf/scripts/lorenz_96_stability.py index 1cd73839..8af47c57 100644 --- a/examples/OpInf/scripts/lorenz_96_stability.py +++ b/examples/OpInf/scripts/lorenz_96_stability.py @@ -14,7 +14,6 @@ import os import pickle - #!/usr/bin/env python import warnings from argparse import ArgumentParser diff --git a/examples/OpInf/scripts/opinf_nonlinear_network.py b/examples/OpInf/scripts/opinf_nonlinear_network.py index 36b6e453..202a6c80 100644 --- a/examples/OpInf/scripts/opinf_nonlinear_network.py +++ b/examples/OpInf/scripts/opinf_nonlinear_network.py @@ -25,12 +25,8 @@ from examples.utils.lorenz_solver import lorenz_solver from simulai.math.integration import LSODA, ClassWrapper from simulai.optimization import Optimizer, ScipyInterface -from simulai.regression import ( - AutoEncoderKoopman, - DenseNetwork, - KoopmanNetwork, - OpInfNetwork, -) +from simulai.regression import (AutoEncoderKoopman, DenseNetwork, + KoopmanNetwork, OpInfNetwork) class LorenzJacobian: diff --git a/examples/OpInf/scripts/svd_lorenz_96_chaotic.py b/examples/OpInf/scripts/svd_lorenz_96_chaotic.py index fb8d920d..4c01453f 100644 --- a/examples/OpInf/scripts/svd_lorenz_96_chaotic.py +++ b/examples/OpInf/scripts/svd_lorenz_96_chaotic.py @@ -13,7 +13,6 @@ # limitations under the License. import os - #!/usr/bin/env python import warnings diff --git a/examples/PINN/scripts/Bioreactor/001-Bioreactor_ODEs.py b/examples/PINN/scripts/Bioreactor/001-Bioreactor_ODEs.py index 75175f57..b5862911 100644 --- a/examples/PINN/scripts/Bioreactor/001-Bioreactor_ODEs.py +++ b/examples/PINN/scripts/Bioreactor/001-Bioreactor_ODEs.py @@ -7,9 +7,9 @@ """ """ Python Libraries""" -import pandas as pd -import numpy as np import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from scipy.integrate import solve_ivp """ Kinetics constants""" diff --git a/examples/PINN/scripts/Bioreactor/002-Bioreactor_PINN.py b/examples/PINN/scripts/Bioreactor/002-Bioreactor_PINN.py index a8cc119a..e768d22b 100644 --- a/examples/PINN/scripts/Bioreactor/002-Bioreactor_PINN.py +++ b/examples/PINN/scripts/Bioreactor/002-Bioreactor_PINN.py @@ -23,24 +23,25 @@ """ """ Import Python Libraries """ +import math import os +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import math -import random import torch torch.set_default_dtype(torch.float64) torch.set_default_tensor_type(torch.DoubleTensor) from simulai import ARRAY_DTYPE +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """#########################################################################""" """REPRODUCIBILITY """ @@ -301,8 +302,8 @@ def adaptative_scale_factors(alpha: float = None): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { @@ -370,10 +371,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 64 @@ -382,8 +384,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { diff --git a/examples/PINN/scripts/Bioreactor_Multifidelity.py b/examples/PINN/scripts/Bioreactor_Multifidelity.py index 0917cd7c..f9e6a732 100644 --- a/examples/PINN/scripts/Bioreactor_Multifidelity.py +++ b/examples/PINN/scripts/Bioreactor_Multifidelity.py @@ -23,20 +23,21 @@ """ """ Import Python Libraries """ +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import random import torch torch.set_default_dtype(torch.float64) +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """ Variables """ # Bioreactor @@ -120,8 +121,8 @@ def Epoch_Decay(iteration_number): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) @@ -183,10 +184,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device t_max = 72.0 n_intervals = 72 @@ -198,8 +200,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) diff --git a/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep.py b/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep.py index 3699c276..af8ea52b 100644 --- a/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep.py +++ b/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep.py @@ -23,20 +23,21 @@ """ """ Import Python Libraries """ +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import random import torch torch.set_default_dtype(torch.float64) +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """ Variables """ # Bioreactor @@ -147,8 +148,8 @@ def Delta_t(i, last_delta_t): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) @@ -210,10 +211,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 50 @@ -222,8 +224,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) diff --git a/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod.py b/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod.py index a196ab11..09022383 100644 --- a/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod.py +++ b/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod.py @@ -23,21 +23,22 @@ """ """ Import Python Libraries """ +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import random import torch torch.set_default_dtype(torch.float64) from simulai import ARRAY_DTYPE +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """ Variables """ # Bioreactor @@ -148,8 +149,8 @@ def Delta_t(i, last_delta_t): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { @@ -217,10 +218,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 50 @@ -229,8 +231,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { diff --git a/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod_weighting.py b/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod_weighting.py index 00176176..5c37087f 100644 --- a/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod_weighting.py +++ b/examples/PINN/scripts/Bioreactor_Multifidelity_AutoTimeStep_mod_weighting.py @@ -23,27 +23,24 @@ """ """ Import Python Libraries """ +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import random import torch torch.set_default_dtype(torch.float64) from simulai import ARRAY_DTYPE -from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface -from simulai.optimization import ( - GeometricMean, - ShiftToMax, - AnnealingWeights, - InverseDirichletWeights, -) +from simulai.file import SPFile +from simulai.optimization import (AnnealingWeights, GeometricMean, + InverseDirichletWeights, Optimizer, + PIRMSELoss, ScipyInterface, ShiftToMax) from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """ Variables """ # Bioreactor @@ -154,8 +151,8 @@ def Delta_t(i, last_delta_t): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { @@ -227,10 +224,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 50 @@ -239,8 +237,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { diff --git a/examples/PINN/scripts/Bioreactor_Multifidelity_adaptive.py b/examples/PINN/scripts/Bioreactor_Multifidelity_adaptive.py index cef8fc8b..a4e5a142 100644 --- a/examples/PINN/scripts/Bioreactor_Multifidelity_adaptive.py +++ b/examples/PINN/scripts/Bioreactor_Multifidelity_adaptive.py @@ -23,20 +23,21 @@ """ """ Import Python Libraries """ +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import random import torch torch.set_default_dtype(torch.float64) +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """ Variables """ # Bioreactor @@ -132,8 +133,8 @@ def Delta_t(i): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) @@ -195,10 +196,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 50 @@ -207,8 +209,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) diff --git a/examples/PINN/scripts/Bioreactor_ODEs.py b/examples/PINN/scripts/Bioreactor_ODEs.py index 0ff17ffb..a242aafa 100644 --- a/examples/PINN/scripts/Bioreactor_ODEs.py +++ b/examples/PINN/scripts/Bioreactor_ODEs.py @@ -7,9 +7,9 @@ """ """ Python Libraries""" -import pandas as pd -import numpy as np import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from scipy.integrate import solve_ivp """ Kinetics constants""" diff --git a/examples/PINN/scripts/Bioreactor_lbfgs.py b/examples/PINN/scripts/Bioreactor_lbfgs.py index 7aaaaf76..1089876e 100644 --- a/examples/PINN/scripts/Bioreactor_lbfgs.py +++ b/examples/PINN/scripts/Bioreactor_lbfgs.py @@ -27,6 +27,7 @@ import numpy as np import pandas as pd import torch + from simulai.optimization import Optimizer from simulai.residuals import SymbolicOperator @@ -110,8 +111,8 @@ def Epoch_Decay(iteration_number): def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork scale_factors = np.array([1, 1, 1, 1]) @@ -220,8 +221,7 @@ def forward(self, input_data=None): device="gpu", ) - from simulai.optimization import ScipyInterface - from simulai.optimization import PIRMSELoss + from simulai.optimization import PIRMSELoss, ScipyInterface loss_instance = PIRMSELoss(operator=net) diff --git a/examples/PINN/scripts/Fire/001-Fire_ODE.py b/examples/PINN/scripts/Fire/001-Fire_ODE.py index 09cedb96..06fb1567 100644 --- a/examples/PINN/scripts/Fire/001-Fire_ODE.py +++ b/examples/PINN/scripts/Fire/001-Fire_ODE.py @@ -38,9 +38,9 @@ """ Import Python Libraries """ -import pandas as pd -import numpy as np import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from scipy.integrate import solve_ivp """ Global Variables """ diff --git a/examples/PINN/scripts/Fire/002-Fire_PINN.py b/examples/PINN/scripts/Fire/002-Fire_PINN.py index c6b5bd72..5a743535 100644 --- a/examples/PINN/scripts/Fire/002-Fire_PINN.py +++ b/examples/PINN/scripts/Fire/002-Fire_PINN.py @@ -36,24 +36,25 @@ """ """ Import Python Libraries """ +import math import os +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import math -import random import torch torch.set_default_dtype(torch.float64) torch.set_default_tensor_type(torch.DoubleTensor) from simulai import ARRAY_DTYPE +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """#########################################################################""" """REPRODUCIBILITY """ @@ -293,8 +294,8 @@ def adaptative_scale_factors(alpha: float = None): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { @@ -362,10 +363,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 64 @@ -374,8 +376,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { diff --git a/examples/PINN/scripts/Rober/001-Rober_ODE.py b/examples/PINN/scripts/Rober/001-Rober_ODE.py index b6b94e28..575a902c 100644 --- a/examples/PINN/scripts/Rober/001-Rober_ODE.py +++ b/examples/PINN/scripts/Rober/001-Rober_ODE.py @@ -35,9 +35,9 @@ """ Import Python Libraries """ -import pandas as pd -import numpy as np import matplotlib.pyplot as plt +import numpy as np +import pandas as pd from scipy.integrate import solve_ivp """ Global Variables """ diff --git a/examples/PINN/scripts/Rober/002-Rober_PINN.py b/examples/PINN/scripts/Rober/002-Rober_PINN.py index 392b4482..72a50c6d 100644 --- a/examples/PINN/scripts/Rober/002-Rober_PINN.py +++ b/examples/PINN/scripts/Rober/002-Rober_PINN.py @@ -34,24 +34,25 @@ """ """ Import Python Libraries """ +import math import os +import random from argparse import ArgumentParser from typing import List + import matplotlib.pyplot as plt import numpy as np import pandas as pd -import math -import random import torch torch.set_default_dtype(torch.float64) torch.set_default_tensor_type(torch.DoubleTensor) from simulai import ARRAY_DTYPE +from simulai.file import SPFile from simulai.optimization import Optimizer, PIRMSELoss, ScipyInterface from simulai.residuals import SymbolicOperator from simulai.templates import NetworkTemplate, guarantee_device -from simulai.file import SPFile """#########################################################################""" """REPRODUCIBILITY """ @@ -305,8 +306,8 @@ def adaptative_scale_factors(alpha: float = None): activations_funct = "tanh" def model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { @@ -374,10 +375,11 @@ def model_(): torch.set_default_dtype(torch.float64) - from simulai.templates import NetworkTemplate, guarantee_device import numpy as np + from simulai.models import ImprovedDenseNetwork from simulai.regression import SLFNN, ConvexDenseNetwork + from simulai.templates import NetworkTemplate, guarantee_device depth = 3 width = 64 @@ -386,8 +388,8 @@ def model_(): # Model used for initialization def sub_model(): - from simulai.regression import SLFNN, ConvexDenseNetwork from simulai.models import ImprovedDenseNetwork + from simulai.regression import SLFNN, ConvexDenseNetwork # Configuration for the fully-connected network config = { diff --git a/examples/PINN/scripts/allen_cahn_system_transformer_pinn.py b/examples/PINN/scripts/allen_cahn_system_transformer_pinn.py index 76df3601..130b9962 100644 --- a/examples/PINN/scripts/allen_cahn_system_transformer_pinn.py +++ b/examples/PINN/scripts/allen_cahn_system_transformer_pinn.py @@ -16,11 +16,11 @@ import numpy as np from simulai.file import SPFile +from simulai.io import Tokenizer +from simulai.models import Transformer from simulai.optimization import Optimizer from simulai.regression import DenseNetwork, ModalRBFNetwork -from simulai.models import Transformer from simulai.residuals import SymbolicOperator -from simulai.io import Tokenizer # Our PDE # Allen-cahn equation @@ -34,8 +34,8 @@ output_labels = ["u"] # Some fixed values -X_DIM = 50 #256 -T_DIM = 20 #100 +X_DIM = 50 # 256 +T_DIM = 20 # 100 L = 1 x_0 = -1 @@ -45,9 +45,9 @@ n_epochs = 5_000 DEVICE = "gpu" num_step = 10 -#""" -step = T/T_DIM -#""" +# """ +step = T / T_DIM +# """ # Generating the training grid @@ -95,24 +95,33 @@ ) # Visualizing the training mesh -#plt.scatter(*np.split(data, 2, axis=1)) -#plt.scatter(*np.split(data_boundary_x0, 2, axis=1)) -#plt.scatter(*np.split(data_boundary_xL, 2, axis=1)) -#plt.scatter(*np.split(data_boundary_t0, 2, axis=1)) +# plt.scatter(*np.split(data, 2, axis=1)) +# plt.scatter(*np.split(data_boundary_x0, 2, axis=1)) +# plt.scatter(*np.split(data_boundary_xL, 2, axis=1)) +# plt.scatter(*np.split(data_boundary_t0, 2, axis=1)) -#plt.show() -#plt.close() +# plt.show() +# plt.close() n_epochs = 50_000 # Maximum number of iterations for ADAM lr = 1e-3 # Initial learning rate for the ADAM algorithm # Preparing datasets -tokenizer = Tokenizer(kind="spatiotemporal_indexer") -input_data = tokenizer.generate_input_tokens(input_data=data, num_step=num_step, step=step) -data_boundary_x0 = tokenizer.generate_input_tokens(input_data=data_boundary_x0, num_step=num_step, step=step) -data_boundary_xL = tokenizer.generate_input_tokens(input_data=data_boundary_xL, num_step=num_step, step=step) -data_boundary_t0 = tokenizer.generate_input_tokens(input_data=data_boundary_t0, num_step=num_step, step=step) -u_init = np.repeat(np.expand_dims(u_init, axis=1), num_step, axis=1) +tokenizer = Tokenizer(kind="spatiotemporal_indexer") +input_data = tokenizer.generate_input_tokens( + input_data=data, num_step=num_step, step=step +) +data_boundary_x0 = tokenizer.generate_input_tokens( + input_data=data_boundary_x0, num_step=num_step, step=step +) +data_boundary_xL = tokenizer.generate_input_tokens( + input_data=data_boundary_xL, num_step=num_step, step=step +) +data_boundary_t0 = tokenizer.generate_input_tokens( + input_data=data_boundary_t0, num_step=num_step, step=step +) +u_init = np.repeat(np.expand_dims(u_init, axis=1), num_step, axis=1) + def model(): from simulai.regression import DenseNetwork @@ -137,8 +146,8 @@ def model(): return net -def model_transformer(): +def model_transformer(): num_heads = 2 embed_dim = 2 embed_dim_out = 1 @@ -157,7 +166,7 @@ def model_transformer(): "input_size": embed_dim, "output_size": embed_dim, "name": "mlp_layer", - "devices":"gpu", + "devices": "gpu", } decoder_mlp_config = { @@ -166,10 +175,9 @@ def model_transformer(): "input_size": embed_dim, "output_size": embed_dim, "name": "mlp_layer", - "devices":"gpu", + "devices": "gpu", } - # Instantiating and training the surrogate model transformer = Transformer( num_heads_encoder=num_heads, @@ -270,5 +278,3 @@ def model_transformer(): fig.colorbar(gf) plt.savefig("allen_cahn.png") - - diff --git a/examples/PINN/scripts/pendulum_system_pytorch_pinn.py b/examples/PINN/scripts/pendulum_system_pytorch_pinn.py index 24da1634..5dcc19a1 100644 --- a/examples/PINN/scripts/pendulum_system_pytorch_pinn.py +++ b/examples/PINN/scripts/pendulum_system_pytorch_pinn.py @@ -26,7 +26,8 @@ from simulai.metrics import L2Norm from simulai.models import DeepONet, ImprovedDeepONet from simulai.optimization import Optimizer -from simulai.regression import SLFNN, ConvexDenseNetwork, DenseNetwork, ResDenseNetwork +from simulai.regression import (SLFNN, ConvexDenseNetwork, DenseNetwork, + ResDenseNetwork) from simulai.residuals import SymbolicOperator diff --git a/examples/Sampling/scripts/hamiltonian_sampling_MNIST.py b/examples/Sampling/scripts/hamiltonian_sampling_MNIST.py index 955b8762..ac3f5225 100644 --- a/examples/Sampling/scripts/hamiltonian_sampling_MNIST.py +++ b/examples/Sampling/scripts/hamiltonian_sampling_MNIST.py @@ -6,7 +6,8 @@ from simulai.file import SPFile from simulai.metrics import L2Norm from simulai.optimization import Optimizer -from simulai.sampling import HMC, G_metric, HamiltonianEquations, LeapFrogIntegrator +from simulai.sampling import (HMC, G_metric, HamiltonianEquations, + LeapFrogIntegrator) def model(): diff --git a/examples/Visualization/visual_nets.ipynb b/examples/Visualization/visual_nets.ipynb index 54dc2f03..1feb4fbf 100644 --- a/examples/Visualization/visual_nets.ipynb +++ b/examples/Visualization/visual_nets.ipynb @@ -780,7 +780,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.11.6" + "version": "3.9.1" } }, "nbformat": 4, diff --git a/simulai/__init__.py b/simulai/__init__.py index 76440a7d..0b9c7f6a 100644 --- a/simulai/__init__.py +++ b/simulai/__init__.py @@ -19,6 +19,7 @@ # Author: Joao Lucas S. Almeida import os + import numpy as np import torch diff --git a/simulai/activations.py b/simulai/activations.py index ad2e6f6b..0a482561 100644 --- a/simulai/activations.py +++ b/simulai/activations.py @@ -14,13 +14,15 @@ import torch -class TrainableActivation(torch.nn.Module): +class TrainableActivation(torch.nn.Module): def __init__(self): super(TrainableActivation, self).__init__() - def setup(self, device:str=None): + + def setup(self, device: str = None): pass + class Siren(torch.nn.Module): """Sinusoidal Representation Networks (SIREN)""" @@ -46,7 +48,7 @@ def share_to_host(self) -> dict: Returns: dict: A dictionary containing the parameters 'omega_0' and 'c'. - + """ return {"omega_0": self.omega_0, "c": self.c} @@ -58,14 +60,14 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: Returns: torch.Tensor: The output of the SIREN model. - + """ return torch.sin(self.omega_0 * input) class sin(torch.nn.Module): """Sine activation function. - + This module applies the sine function element-wise to the input. """ @@ -73,9 +75,7 @@ class sin(torch.nn.Module): name = "sin" def __init__(self) -> None: - """Initialize the sine activation function. - - """ + """Initialize the sine activation function.""" super(sin, self).__init__() @@ -87,7 +87,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: Returns: torch.Tensor: The output of the sine activation function. - + """ return torch.sin(input) @@ -97,16 +97,14 @@ class Wavelet(TrainableActivation): name = "wavelet" - def __init__(self, device:str="cpu") -> None: - + def __init__(self, device: str = "cpu") -> None: super(Wavelet, self).__init__() self.device = device self.w1 = torch.nn.Parameter(torch.ones(1).to(self.device), requires_grad=True) self.w2 = torch.nn.Parameter(torch.ones(1).to(self.device), requires_grad=True) - def setup(self, device:str=None) -> None: - + def setup(self, device: str = None) -> None: self.w1 = torch.nn.Parameter(torch.ones(1).to(self.device), requires_grad=True) self.w2 = torch.nn.Parameter(torch.ones(1).to(self.device), requires_grad=True) @@ -118,7 +116,7 @@ def forward(self, input: torch.Tensor) -> torch.Tensor: Returns: torch.Tensor: The output of the sine activation function. - + """ return self.w1 * torch.sin(input) + self.w2 * torch.cos(input) diff --git a/simulai/batching.py b/simulai/batching.py index 191ee8e1..f5ed8732 100644 --- a/simulai/batching.py +++ b/simulai/batching.py @@ -40,7 +40,7 @@ def __init__( input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) - + """ # This import avoids circular importing @@ -85,8 +85,8 @@ def minmax( data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: - A tuple of minimum and maximum value of the target variables.: - + A tuple of minimum and maximum value of the target variables.: + """ min_list = [] max_list = [] @@ -107,8 +107,8 @@ def input_shape(self) -> list: if 'channels_first' is True. Returns: - A list of integers representing the shape of the input variables.: - + A list of integers representing the shape of the input variables.: + """ if self.channels_first: shape_ = self.dataset[self.input_variables[0]].shape @@ -127,8 +127,8 @@ def _normalization_bypass(self, data: np.ndarray = None) -> np.ndarray: data (np.ndarray, optional): The data to be bypassed. (Default value = None) Returns: - Same data: - + Same data: + """ return data @@ -139,8 +139,8 @@ def _target_normalization(self, data: np.ndarray = None) -> np.ndarray: data (np.ndarray, optional): The target data to be normalized. (Default value = None) Returns: - Normalized target data.: - + Normalized target data.: + """ return self.target_normalizer(data=data) @@ -151,8 +151,8 @@ def _input_normalization(self, data: np.ndarray = None) -> np.ndarray: data (np.ndarray, optional): The input data to be normalized. (Default value = None) Returns: - Normalized input data.: - + Normalized input data.: + """ return self.input_normalizer(data=data) @@ -161,7 +161,7 @@ def _transpose_first_channel(self, variables_list: list = None) -> torch.Tensor: Args: variables_list (list, optional): (Default value = None) - + """ batch = np.stack(variables_list, axis=-1) @@ -179,8 +179,8 @@ def _simple_stack(self, variables_list: list = None) -> torch.Tensor: variables_list (list, optional): The list of variables to be stacked. (Default value = None) Returns: - A torch tensor of stacked variables.: - + A torch tensor of stacked variables.: + """ batch = np.stack(variables_list, dim=-1) @@ -193,8 +193,8 @@ def input_data(self, indices: np.ndarray = None) -> torch.Tensor: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: - A torch tensor of input data: - + A torch tensor of input data: + """ indices = np.sort(indices) @@ -211,8 +211,8 @@ def target_data(self, indices: np.ndarray = None) -> torch.Tensor: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: - A torch tensor of target data: - + A torch tensor of target data: + """ indices = np.sort(indices) @@ -238,8 +238,8 @@ def batchdomain_constructor( batch_indices (list, optional): A list of indices to be divided into batches. (Default value = None) Returns: - A list of lists containing the indices of the input data in the form of batches.: - + A list of lists containing the indices of the input data in the form of batches.: + """ if data_interval is not None: @@ -300,8 +300,8 @@ def indices_batchdomain_constructor( batch_size (int, optional): The desired size of the batches. (Default value = None) Returns: - A list of lists containing the indices of the input data in the form of batches.: - + A list of lists containing the indices of the input data in the form of batches.: + """ interval_size = indices.shape[0] diff --git a/simulai/file.py b/simulai/file.py index 3db1f5ea..4366f757 100644 --- a/simulai/file.py +++ b/simulai/file.py @@ -14,8 +14,8 @@ import importlib import inspect -import pickle import os +import pickle import sys from typing import Union @@ -29,12 +29,12 @@ def load_pkl(path: str = None) -> Union[object, None]: path (str, optional): (Default value = None) Returns: - object or None: + object or None: Raises: Exception: if the provided path is not a file or cannot be opened - + """ import pickle @@ -64,14 +64,14 @@ def load_pkl(path: str = None) -> Union[object, None]: class SPFile: def __init__(self, compact: bool = False) -> None: """Class for handling persistence of Pytorch Module-like objects. - + SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False - + """ self.compact = compact @@ -83,7 +83,7 @@ def _leading_size(self, first_line: str = None) -> int: Returns: int: number of leading white spaces. - + """ leading_whitespaces = len(first_line) - len(first_line.lstrip()) return leading_whitespaces @@ -96,7 +96,7 @@ def _process_code(self, code: str = None) -> str: Returns: str: The code string with leading white spaces removed. - + """ code_lines = code.split("\n") first_line = code_lines[0] @@ -157,7 +157,7 @@ def read( Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. - + """ name = os.path.basename(model_path) save_dir = model_path diff --git a/simulai/io.py b/simulai/io.py index c46f73c0..f82ceb68 100644 --- a/simulai/io.py +++ b/simulai/io.py @@ -24,7 +24,8 @@ from simulai import ARRAY_DTYPE from simulai.abstract import DataPreparer, Dataset -from simulai.batching import batchdomain_constructor, indices_batchdomain_constructor +from simulai.batching import (batchdomain_constructor, + indices_batchdomain_constructor) from simulai.metrics import MemorySizeEval """ @@ -1789,6 +1790,7 @@ def size(self): def __call__(self): return (1 + self.stddev * torch.randn(*self.data_shape)) * self.input_data + class Tokenizer: """Wrapper for multiple tokenization approaches""" @@ -1804,7 +1806,7 @@ def __init__(self, kind: str = "time_indexer"): if self.kind == "time_indexer": self.input_tokenizer = self._make_time_input_sequence self.target_tokenizer = self._make_time_target_sequence - + elif self.kind == "spatiotemporal_indexer": self.input_tokenizer = self._make_spatiotemporal_sequence @@ -1815,20 +1817,26 @@ def __init__(self, kind: str = "time_indexer"): else: raise Exception(f"The tokenization option {self.kind} is not available.") - def generate_input_tokens(self, input_data: Union[np.ndarray, torch.Tensor], **kwargs) -> torch.Tensor: - + def generate_input_tokens( + self, input_data: Union[np.ndarray, torch.Tensor], **kwargs + ) -> torch.Tensor: """Generating the input sequence of tokens.""" return self.input_tokenizer(input_data, **kwargs) - - def generate_target_tokens(self, target_data: Union[np.ndarray, torch.Tensor], **kwargs) -> torch.Tensor: + def generate_target_tokens( + self, target_data: Union[np.ndarray, torch.Tensor], **kwargs + ) -> torch.Tensor: """Generating the target sequence of tokens.""" return self.target_tokenizer(target_data, **kwargs) - def _make_time_input_sequence(self, - src: Union[np.ndarray, torch.Tensor], num_step:int=None, step:float=None, remove_final=True, + def _make_time_input_sequence( + self, + src: Union[np.ndarray, torch.Tensor], + num_step: int = None, + step: float = None, + remove_final=True, ) -> Union[np.ndarray, torch.Tensor]: """Simple tokenization based on repeating samples and time-indexing them. @@ -1851,14 +1859,18 @@ def _make_time_input_sequence(self, for i in range(num_step): src_final[:, i, -1] += step * i - + if remove_final: - return src_final[:-num_step + 1] + return src_final[: -num_step + 1] else: return src_final - def _make_spatiotemporal_sequence(self, - src: Union[np.ndarray, torch.Tensor], num_step:int=None, step:float=None, **kwargs, + def _make_spatiotemporal_sequence( + self, + src: Union[np.ndarray, torch.Tensor], + num_step: int = None, + step: float = None, + **kwargs, ) -> Union[np.ndarray, torch.Tensor]: """Simple tokenization based on repeating samples and time-indexing them. @@ -1872,13 +1884,13 @@ def _make_spatiotemporal_sequence(self, dim = num_step src = np.repeat(np.expand_dims(src, axis=1), dim, axis=1) # (N, L, 2) for i in range(num_step): - src[:,i,-1] += step*i - - return src + src[:, i, -1] += step * i + return src - def _make_time_target_sequence(self, - src: Union[np.ndarray, torch.Tensor], num_step:int=None) -> Union[np.ndarray, torch.Tensor]: + def _make_time_target_sequence( + self, src: Union[np.ndarray, torch.Tensor], num_step: int = None + ) -> Union[np.ndarray, torch.Tensor]: """Simple tokenization based on repeating samples and time-indexing them. Args: @@ -1887,13 +1899,19 @@ def _make_time_target_sequence(self, Returns: Union[np.ndarray, torch.Tensor]: The tokenized target dataset. """ - moving_window = MovingWindow(history_size=1, skip_size=1, horizon_size=num_step - 1) + moving_window = MovingWindow( + history_size=1, skip_size=1, horizon_size=num_step - 1 + ) input_data, output_data = moving_window(input_data=src, output_data=src) return np.concatenate([input_data, output_data], axis=1) - def _make_time_deeponet_input_sequence(self, - src: Union[np.ndarray, torch.Tensor], num_step:int=None, step:float=None, remove_final=True, + def _make_time_deeponet_input_sequence( + self, + src: Union[np.ndarray, torch.Tensor], + num_step: int = None, + step: float = None, + remove_final=True, ) -> Union[np.ndarray, torch.Tensor]: """Simple tokenization based on repeating samples and time-indexing them adapted for DeepONet architectures. @@ -1904,16 +1922,19 @@ def _make_time_deeponet_input_sequence(self, Returns: Union[np.ndarray, torch.Tensor]: The tokenized input dataset. """ - - output = self._make_time_input_sequence(src, num_step, step, remove_final=remove_final) + + output = self._make_time_input_sequence( + src, num_step, step, remove_final=remove_final + ) output = np.concatenate(output, axis=0) # Outputs for branch and trunk networks return (output[:, :-1], output[:, -1:]) - def _make_time_deeponet_target_sequence(self, - src: Union[np.ndarray, torch.Tensor], num_step:int=None) -> Union[np.ndarray, torch.Tensor]: + def _make_time_deeponet_target_sequence( + self, src: Union[np.ndarray, torch.Tensor], num_step: int = None + ) -> Union[np.ndarray, torch.Tensor]: """Simple tokenization based on repeating samples and time-indexing them adapted for DeepONet architectures. Args: @@ -1928,4 +1949,3 @@ def _make_time_deeponet_target_sequence(self, output = np.concatenate(output, axis=0) return output - diff --git a/simulai/models/__init__.py b/simulai/models/__init__.py index ca030a70..e1a5489a 100644 --- a/simulai/models/__init__.py +++ b/simulai/models/__init__.py @@ -15,26 +15,12 @@ from simulai import engine if engine == "pytorch": - from ._pytorch_models import ( - AutoencoderCNN, - AutoencoderKoopman, - AutoencoderMLP, - AutoencoderVariational, - MultiScaleAutoencoder, - DeepONet, - FlexibleDeepONet, - ImprovedDeepONet, - NIF, - ImprovedDenseNetwork, - Transformer, - UNet, - MetaModel, - ModelMaker, - MultiNetwork, - MoEPool, - SplitPool, - ResDeepONet, - ) + from ._pytorch_models import (NIF, AutoencoderCNN, AutoencoderKoopman, + AutoencoderMLP, AutoencoderVariational, + DeepONet, FlexibleDeepONet, ImprovedDeepONet, + ImprovedDenseNetwork, MetaModel, ModelMaker, + MoEPool, MultiNetwork, MultiScaleAutoencoder, + ResDeepONet, SplitPool, Transformer, UNet) elif engine == "numpy": pass else: diff --git a/simulai/models/_pytorch_models/__init__.py b/simulai/models/_pytorch_models/__init__.py index 7bc6d19d..4d8b96f4 100644 --- a/simulai/models/_pytorch_models/__init__.py +++ b/simulai/models/_pytorch_models/__init__.py @@ -1,19 +1,9 @@ -from ._autoencoder import ( - AutoencoderCNN, - AutoencoderKoopman, - AutoencoderMLP, - AutoencoderVariational, - MultiScaleAutoencoder, -) -from ._deeponet import DeepONet, FlexibleDeepONet, ImprovedDeepONet, ResDeepONet +from ._autoencoder import (AutoencoderCNN, AutoencoderKoopman, AutoencoderMLP, + AutoencoderVariational, MultiScaleAutoencoder) +from ._deeponet import (DeepONet, FlexibleDeepONet, ImprovedDeepONet, + ResDeepONet) +from ._miscellaneous import (ImprovedDenseNetwork, MetaModel, ModelMaker, + MoEPool, MultiNetwork, SplitPool) from ._nif import NIF from ._transformer import Transformer from ._unet import UNet -from ._miscellaneous import ( - ImprovedDenseNetwork, - MetaModel, - ModelMaker, - MultiNetwork, - MoEPool, - SplitPool, -) diff --git a/simulai/models/_pytorch_models/_autoencoder.py b/simulai/models/_pytorch_models/_autoencoder.py index 01f5f160..f18e2927 100644 --- a/simulai/models/_pytorch_models/_autoencoder.py +++ b/simulai/models/_pytorch_models/_autoencoder.py @@ -12,20 +12,15 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Tuple, Union, List +from typing import List, Optional, Tuple, Union import numpy as np import torch from simulai import ARRAY_DTYPE from simulai.regression import ConvolutionalNetwork, DenseNetwork, Linear -from simulai.templates import ( - NetworkTemplate, - as_tensor, - autoencoder_auto, - cnn_autoencoder_auto, - mlp_autoencoder_auto, -) +from simulai.templates import (NetworkTemplate, as_tensor, autoencoder_auto, + cnn_autoencoder_auto, mlp_autoencoder_auto) ######################################## diff --git a/simulai/models/_pytorch_models/_nif.py b/simulai/models/_pytorch_models/_nif.py index 4bcac92f..655b2ff5 100644 --- a/simulai/models/_pytorch_models/_nif.py +++ b/simulai/models/_pytorch_models/_nif.py @@ -21,19 +21,19 @@ from simulai.regression import ConvexDenseNetwork, Linear from simulai.templates import NetworkTemplate, guarantee_device -class WorkflowModule(torch.nn.Module): - - def __init__(self, network: NetworkTemplate=None) -> None: +class WorkflowModule(torch.nn.Module): + def __init__(self, network: NetworkTemplate = None) -> None: super(WorkflowModule, self).__init__() self.network = network - def forward(self, parameters: torch.Tensor=None, - input_tensor: torch.Tensor=None): - self.network.set_parameters(parameters=parameters, requires_grad=False) + def forward( + self, parameters: torch.Tensor = None, input_tensor: torch.Tensor = None + ): + self.network.set_parameters(parameters=parameters, requires_grad=False) - return self.network(input_tensor) + return self.network(input_tensor) class NIF(NetworkTemplate): @@ -69,20 +69,23 @@ def __init__( self.device = self._set_device(devices=devices) self.shape_network = self.to_wrap(entity=shape_network, device=self.device) - self.parameter_network = self.to_wrap(entity=parameter_network, device=self.device) + self.parameter_network = self.to_wrap( + entity=parameter_network, device=self.device + ) - # The number of coefficients to be estimated + # The number of coefficients to be estimated # by the parameter network self.n_shape_parameters = self.shape_network.n_parameters - self.n_inputs_shape = self.shape_network.input_size + self.n_inputs_shape = self.shape_network.input_size self.n_outputs_shape = self.shape_network.output_size self.n_inputs_parameter = self.parameter_network.input_size self.n_outputs_parameter = self.parameter_network.output_size # Latent projection. It is, as default choice, a linear # operation - self.latent_projection = Linear(input_size=self.n_outputs_parameter, - output_size=self.n_shape_parameters) + self.latent_projection = Linear( + input_size=self.n_outputs_parameter, output_size=self.n_shape_parameters + ) # The shape network is not trainable, the coefficients are # estimated from the parameter network @@ -128,7 +131,11 @@ def __init__( self.subnetworks = [ net - for net in [self.shape_network, self.parameter_network, self.decoder_network] + for net in [ + self.shape_network, + self.parameter_network, + self.decoder_network, + ] if net is not None ] @@ -148,8 +155,12 @@ def __init__( self.subnetworks_names = ["shape", "parameter"] # Tracing the shape net workflow using TorchScript - sample_parameters_tensor = torch.from_numpy(np.random.rand(self.n_shape_parameters).astype(ARRAY_DTYPE)) - sample_input_tensor = torch.from_numpy(np.random.rand(1_00, self.n_inputs_shape).astype(ARRAY_DTYPE)) + sample_parameters_tensor = torch.from_numpy( + np.random.rand(self.n_shape_parameters).astype(ARRAY_DTYPE) + ) + sample_input_tensor = torch.from_numpy( + np.random.rand(1_00, self.n_inputs_shape).astype(ARRAY_DTYPE) + ) workflow_instance = WorkflowModule(network=self.shape_network) @@ -159,7 +170,7 @@ def __init__( def _forward( self, input_shape: torch.Tensor = None, output_parameter: torch.Tensor = None ) -> torch.Tensor: - """ + """ Args: input_shape (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) @@ -172,7 +183,7 @@ def _forward( # The latent space outputted by the parameter network is projected onto another # high-dimensional space (with dimensionality equivalent to the parameters space) estimated_parameters = self.latent_projection(output_parameter) - + workflow_instance = WorkflowModule(network=self.shape_network) # The shape network is set up using those estimated coefficients @@ -254,7 +265,9 @@ def eval( """ - output_tensor = self.forward(input_shape=shape_data, input_parameter=parameter_data) + output_tensor = self.forward( + input_shape=shape_data, input_parameter=parameter_data + ) return output_tensor.cpu().detach().numpy() @@ -283,4 +296,3 @@ def eval_subnetwork( def summary(self) -> None: print(self) - diff --git a/simulai/models/_pytorch_models/_transformer.py b/simulai/models/_pytorch_models/_transformer.py index 6911dbd9..d6e04748 100644 --- a/simulai/models/_pytorch_models/_transformer.py +++ b/simulai/models/_pytorch_models/_transformer.py @@ -1,21 +1,23 @@ import copy +from typing import Tuple, Union + import numpy as np import torch -from typing import Union, Tuple -from simulai.templates import NetworkTemplate, as_tensor, guarantee_device -from simulai.regression import DenseNetwork, Linear from simulai.activations import TrainableActivation +from simulai.regression import DenseNetwork, Linear +from simulai.templates import NetworkTemplate, as_tensor, guarantee_device + class BaseTemplate(NetworkTemplate): - def __init__(self, device:str="cpu"): + def __init__(self, device: str = "cpu"): """Template used for sharing fundamental methods with the children transformer-like encoders and decoders. """ super(BaseTemplate, self).__init__() - self.device = device + self.device = device def _activation_getter( self, activation: Union[str, torch.nn.Module] @@ -51,7 +53,7 @@ def __init__( activation: Union[str, torch.nn.Module] = "relu", mlp_layer: torch.nn.Module = None, embed_dim: Union[int, Tuple] = None, - device:str="cpu", + device: str = "cpu", ) -> None: """Generic transformer encoder. @@ -114,7 +116,7 @@ def __init__( activation: Union[str, torch.nn.Module] = "relu", mlp_layer: torch.nn.Module = None, embed_dim: Union[int, Tuple] = None, - device:str="cpu", + device: str = "cpu", ): """Generic transformer decoder. @@ -238,7 +240,7 @@ def __init__( self.encoder_mlp_layers_list = list() self.decoder_mlp_layers_list = list() - #Determining the kind of device in which the modelwill be executed + # Determining the kind of device in which the modelwill be executed self.device = self._set_device(devices=devices) # Creating independent copies for the MLP layers which will be used @@ -291,8 +293,9 @@ def __init__( self.weights += decoder_d.weights self.add_module(f"decoder_{d}", decoder_d) - - self.final_layer = Linear(input_size=self.embed_dim_decoder, output_size=self.output_dim) + self.final_layer = Linear( + input_size=self.embed_dim_decoder, output_size=self.output_dim + ) self.add_module("final_linear_layer", self.final_layer) # Sending everything to the proper device diff --git a/simulai/models/_pytorch_models/_unet.py b/simulai/models/_pytorch_models/_unet.py index ec3fda6b..111da65c 100644 --- a/simulai/models/_pytorch_models/_unet.py +++ b/simulai/models/_pytorch_models/_unet.py @@ -1,10 +1,11 @@ import copy +from typing import Dict, List, Optional, Tuple, Union + import numpy as np import torch -from typing import Union, List, Tuple, Optional, Dict +from simulai.regression import SLFNN, ConvolutionalNetwork, DenseNetwork from simulai.templates import NetworkTemplate, as_tensor, channels_dim -from simulai.regression import DenseNetwork, SLFNN, ConvolutionalNetwork # A CNN UNet encoder or decodeder is no more than a curved CNN diff --git a/simulai/optimization/__init__.py b/simulai/optimization/__init__.py index 820d65c1..4332b786 100644 --- a/simulai/optimization/__init__.py +++ b/simulai/optimization/__init__.py @@ -19,9 +19,9 @@ from ._builtin import SpaRSA if engine == "pytorch": + from ._adjusters import * from ._builtin_pytorch import BBI from ._losses import * - from ._adjusters import * from ._optimization import Optimizer, ScipyInterface elif engine == "numpy": pass diff --git a/simulai/optimization/_adjusters.py b/simulai/optimization/_adjusters.py index c290b7df..71502ab4 100644 --- a/simulai/optimization/_adjusters.py +++ b/simulai/optimization/_adjusters.py @@ -1,4 +1,5 @@ -from typing import List, Dict, Callable +from typing import Callable, Dict, List + import numpy as np import torch @@ -16,7 +17,7 @@ def _clip_grad(self, grad: torch.Tensor = None, size: int = None) -> torch.Tenso size (int): Number of parameters. Returns: - torch.Tensor: The clipped gradients. + torch.Tensor: The clipped gradients. """ if not isinstance(grad, torch.Tensor): @@ -31,10 +32,10 @@ def _grad( Args: loss (torch.tensor): The current state of the loss function. - operator (NetworkTemplate): The model being trained. + operator (NetworkTemplate): The model being trained. Returns: - torch.Tensor: The gradients evaluated for all the parameters. + torch.Tensor: The gradients evaluated for all the parameters. """ if loss.requires_grad: @@ -63,8 +64,8 @@ def _grad( class GeometricMean: def __init__(self): """Simple and naive approach for balancing the loss terms in which - they are rescaled to have the same order of magnitude of the geometric - mean between all the terms. + they are rescaled to have the same order of magnitude of the geometric + mean between all the terms. """ pass @@ -80,7 +81,7 @@ def __call__( Args: residual (List[torch.Tensor]): List containing all the equation-based loss terms. - loss_evaluator (Callable): A Python class or function for evaluating + loss_evaluator (Callable): A Python class or function for evaluating the loss function. """ @@ -98,8 +99,8 @@ def __call__( class ShiftToMax: def __init__(self): """Simple and naive approach for balancing the loss terms in which - they are rescaled to have the same order of magnitude of the maximum value - of all the terms. + they are rescaled to have the same order of magnitude of the maximum value + of all the terms. """ pass @@ -116,7 +117,7 @@ def __call__( Args: residual (List[torch.Tensor]): List containing all the equation-based loss terms. - loss_evaluator (Callable): A Python class or function for evaluating + loss_evaluator (Callable): A Python class or function for evaluating the loss function. """ @@ -145,14 +146,14 @@ def __init__( bound_weight: float = 1.0, extra_data_weight: float = 1.0, ) -> None: - """Learning rate Annealing technique used + """Learning rate Annealing technique used for scaling equation-based loss terms (see https://arxiv.org/abs/2001.04536) Args: alpha (float): 1 - update step. init_weight (float): Initial value for the initial condition weight. bound_weight (float): Initial value for the boundary condition weight. - extra_data_weight (float): Initial value for the weight related to + extra_data_weight (float): Initial value for the weight related to data-drive loss terms. """ @@ -177,12 +178,11 @@ def _coeff_update(self, loss_ref: torch.tensor = None, loss: torch.tensor = None def __call__( self, residual: torch.tensor = None, operator: NetworkTemplate = None, **kwargs ) -> List[torch.tensor]: - """ Args: residual (torch.tensor): Tensor containing the equation residual. - operator (NetworkTemplate): Model being trained. + operator (NetworkTemplate): Model being trained. Returns: List[torch.tensor]: A list containing the updated loss weights. @@ -222,14 +222,13 @@ class InverseDirichletWeights(WeightsEstimator): def __init__( self, alpha: float = None, initial_weights: List[float] = None ) -> None: - - """Inverse Dirichlet technique used + """Inverse Dirichlet technique used for scaling equation-based loss terms (see https://iopscience.iop.org/article/10.1088/2632-2153/ac3712/pdf) Args: alpha (float): 1 - update step. initial_weights (List[float]): List containing the initial states of all the loss - function terms. + function terms. """ super().__init__() @@ -264,7 +263,7 @@ def __call__( Args: residual (List[torch.Tensor]): List of equation-based loss terms. - loss_evaluator (Callable): Python function or class which evaluates the + loss_evaluator (Callable): Python function or class which evaluates the loss function. operator (Callable): Model being trained. Returns: diff --git a/simulai/optimization/_builtin.py b/simulai/optimization/_builtin.py index e378daac..6377afcc 100644 --- a/simulai/optimization/_builtin.py +++ b/simulai/optimization/_builtin.py @@ -16,6 +16,7 @@ import numpy as np + # Sparse Regression Algorithm class SpaRSA: def __init__( @@ -33,9 +34,9 @@ def __init__( lambd (float): Quadratic regularization penalty. alpha_0 (float): Update step lenght. epsilon (float): Error tolerance. - sparsity_tol (float): Sparsity tolerance. + sparsity_tol (float): Sparsity tolerance. use_mean (bool): Use mean for evaluating loss or not. - transform (callable): A transformation to be applied to the data. + transform (callable): A transformation to be applied to the data. """ @@ -115,10 +116,10 @@ def fit( Args: input_data (np.ndarray): Input data for training the model. - target_data (np.ndarray): Target data for training the model. + target_data (np.ndarray): Target data for training the model. Returns: - + """ self.W = self.transform(data=input_data) diff --git a/simulai/optimization/_builtin_pytorch.py b/simulai/optimization/_builtin_pytorch.py index 296356c3..d20ab58f 100644 --- a/simulai/optimization/_builtin_pytorch.py +++ b/simulai/optimization/_builtin_pytorch.py @@ -19,7 +19,6 @@ class BBI(Optimizer): - def __init__( self, params: dict = None, @@ -33,20 +32,20 @@ def __init__( consEn: bool = True, n_fixed_bounces: int = 1, ) -> None: - """Optimizer based on the BBI model of inflation. - - Args: - params (iterable): iterable of parameters to optimize or dicts defining parameter groups - lr (float): learning rate - v0 (float): expected minimum of the potential (Delta V in the paper) - threshold0 (int): threshold for fixed bounces (T0 in the paper) - threshold1 (int): threshold for progress-dependent bounces (T1 in the paper) - deltaEn (float): extra initial energy (delta E in the paper) - consEn (bool): if True enforces energy conservation at every step - n_fixed_bounces (int): number of bounces every T0 iterations (Nb in the paper) - """ - - defaults = dict( + """Optimizer based on the BBI model of inflation. + + Args: + params (iterable): iterable of parameters to optimize or dicts defining parameter groups + lr (float): learning rate + v0 (float): expected minimum of the potential (Delta V in the paper) + threshold0 (int): threshold for fixed bounces (T0 in the paper) + threshold1 (int): threshold for progress-dependent bounces (T1 in the paper) + deltaEn (float): extra initial energy (delta E in the paper) + consEn (bool): if True enforces energy conservation at every step + n_fixed_bounces (int): number of bounces every T0 iterations (Nb in the paper) + """ + + defaults = dict( lr=lr, eps1=eps1, eps2=eps2, @@ -56,26 +55,25 @@ def __init__( deltaEn=deltaEn, consEn=consEn, n_fixed_bounces=n_fixed_bounces, - ) - self.energy = None - self.min_loss = None - self.iteration = 0 - self.deltaEn = deltaEn - self.n_fixed_bounces = n_fixed_bounces - self.consEn = consEn + ) + self.energy = None + self.min_loss = None + self.iteration = 0 + self.deltaEn = deltaEn + self.n_fixed_bounces = n_fixed_bounces + self.consEn = consEn - super(BBI, self).__init__(params, defaults) + super(BBI, self).__init__(params, defaults) def __setstate__(self, state): super(BBI, self).__setstate__(state) def step(self, closure: callable) -> torch.Tensor: - """ Args: closure (callable): A function which enclosures the loss - evaluation. + evaluation. Returns: torch.Tensor: The evaluation for the loss function. """ diff --git a/simulai/optimization/_losses.py b/simulai/optimization/_losses.py index 24832df7..4b10d21c 100644 --- a/simulai/optimization/_losses.py +++ b/simulai/optimization/_losses.py @@ -13,8 +13,8 @@ # limitations under the License. import sys -from typing import Callable, List, Tuple, Union from time import sleep +from typing import Callable, List, Tuple, Union import numpy as np import torch @@ -23,8 +23,8 @@ from simulai import ARRAY_DTYPE from simulai.io import IntersectingBatches from simulai.models import AutoencoderKoopman, AutoencoderVariational, DeepONet -from simulai.residuals import SymbolicOperator from simulai.optimization._adjusters import AnnealingWeights +from simulai.residuals import SymbolicOperator class LossBasics: @@ -323,7 +323,9 @@ def _data_loss( """ target_split = torch.split(target_data_tensor, self.split_dim, dim=axis) - output_split = torch.split(output_tilde, self.split_dim, dim=axis)[:len(target_split)] + output_split = torch.split(output_tilde, self.split_dim, dim=axis)[ + : len(target_split) + ] data_losses = [ weights[i] @@ -560,7 +562,9 @@ def _data_loss( """ target_split = torch.split(target_data_tensor, self.split_dim, dim=-1) - output_split = torch.split(output_tilde, self.split_dim, dim=-1)[:len(target_split)] + output_split = torch.split(output_tilde, self.split_dim, dim=-1)[ + : len(target_split) + ] data_losses = [ self.loss_evaluator_data((out_split, tgt_split)) @@ -670,7 +674,10 @@ def _residual_loss_adaptive( return [sum(residual_loss)] def _extra_data( - self, input_data: torch.Tensor = None, target_data: torch.Tensor = None, weights :list = None, + self, + input_data: torch.Tensor = None, + target_data: torch.Tensor = None, + weights: list = None, ) -> torch.Tensor: # Evaluating data for the initial condition output_tilde = self.operator(input_data=input_data) @@ -678,7 +685,7 @@ def _extra_data( # Evaluating loss approximation for extra data data_loss = self._data_loss( output_tilde=output_tilde, - target_data_tensor=target_data, + target_data_tensor=target_data, weights=weights, ) @@ -706,23 +713,22 @@ def _boundary_penalisation( def _no_boundary_penalisation( self, boundary_input: dict = None, residual: object = None ) -> List[torch.Tensor]: - """It is used for cases in which no boundary condition is applied - - """ + """It is used for cases in which no boundary condition is applied""" return [torch.Tensor([0.0]).to(self.device) for k in boundary_input.keys()] def _no_boundary( self, boundary_input: dict = None, residual: object = None ) -> List[torch.Tensor]: - """It is used for cases where there are not boundaries - - """ + """It is used for cases where there are not boundaries""" return torch.Tensor([0.0]).to(self.device) def _no_extra_data( - self, input_data: torch.Tensor = None, target_data: torch.Tensor = None, weights: list=None, + self, + input_data: torch.Tensor = None, + target_data: torch.Tensor = None, + weights: list = None, ) -> torch.Tensor: return torch.Tensor([0.0]).to(self.device) diff --git a/simulai/optimization/_optimization.py b/simulai/optimization/_optimization.py index 76c06387..d1c5ff95 100644 --- a/simulai/optimization/_optimization.py +++ b/simulai/optimization/_optimization.py @@ -159,10 +159,10 @@ def __init__( Args: optimizer (str): A name for a PyTorch optimizer. - early_stopping (bool): Early-stopping will be used or not. + early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. - shuffle (bool): Shuffle the dataset or not. - lr_decay_scheduler_params (dict): The parameters used for defining + shuffle (bool): Shuffle the dataset or not. + lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). @@ -335,7 +335,9 @@ def _get_lr_decay(self) -> Union[callable, None]: if self.lr_decay_scheduler_params is not None: name = self.lr_decay_scheduler_params.pop("name") try: - self.decay_frequency = self.lr_decay_scheduler_params.pop("decay_frequency") + self.decay_frequency = self.lr_decay_scheduler_params.pop( + "decay_frequency" + ) except: pass lr_class = getattr(torch.optim.lr_scheduler, name) @@ -431,24 +433,29 @@ def _get_ondisk_data( ) -> torch.Tensor: indices = np.sort(indices) - ondisk_formats = {np.ndarray: self._convert_ondisk_data_array, - dict: self._convert_ondisk_data_dict} + ondisk_formats = { + np.ndarray: self._convert_ondisk_data_array, + dict: self._convert_ondisk_data_dict, + } data = dataset(indices=indices) return ondisk_formats.get(type(data))(data=data) def _convert_ondisk_data_array( - self, data: np.ndarray=None, + self, + data: np.ndarray = None, ) -> torch.Tensor: - return torch.from_numpy(data.astype(ARRAY_DTYPE)) def _convert_ondisk_data_dict( - self, data: np.ndarray=None, + self, + data: np.ndarray = None, ) -> torch.Tensor: - - return {key: torch.from_numpy((value).astype(ARRAY_DTYPE)) for key, value in data.items()} + return { + key: torch.from_numpy((value).astype(ARRAY_DTYPE)) + for key, value in data.items() + } # Preparing the batches (converting format and moving to the correct device) # in a single batch optimization loop @@ -477,22 +484,13 @@ def _batchwise_make_input_data( # When the 'input data' is just a pointer for a lazzy dataset elif callable(input_data): - - data = self.get_data( - dataset=input_data, indices=batch_indices - ) + data = self.get_data(dataset=input_data, indices=batch_indices) if type(data) == torch.Tensor: - - input_data_dict = { - self.input_data_name: data.to(device) - } + input_data_dict = {self.input_data_name: data.to(device)} else: - input_data_dict = { - key: item.to(device) - for key, item in data.items() - } - + input_data_dict = {key: item.to(device) for key, item in data.items()} + # The rest of the possible cases else: input_data_dict = { @@ -632,7 +630,7 @@ def _batchwise_optimization_loop( # single optimization step indices = samples_permutation[ibatch] - # The input batch usually requires more pre-processing and + # The input batch usually requires more pre-processing and # specifications input_batch = self._batchwise_make_input_data( input_data, device=device, batch_indices=indices @@ -654,13 +652,13 @@ def _batchwise_optimization_loop( # A single optimization step self.optimizer_instance.step(loss_function) - # Writing the training information to a Tensorboard file + # Writing the training information to a Tensorboard file # (if it is required) self.summary_writer( loss_states=loss_instance.loss_states, epoch=b_epoch ) - # Checkpoint the model + # Checkpoint the model self.checkpoint_handler( model=op, epoch=b_epoch, **self.checkpoint_params ) @@ -707,21 +705,21 @@ def fit( Args: op (NetworkTemplate): The model which will be trained - input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as - input for the model. + input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as + input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). - n_epochs (int): Number of epochs for the optimization process. + n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. - distributed (bool): Use distributed (multi-node) training or not. + distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. """ - + # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None @@ -1030,12 +1028,12 @@ def _stack_and_convert_parameters( ) -> np.ndarray: """ It produces a stack of all the model parameters. - + Args: - parameters (List[Union[torch.Tensor, np.ndarray]]): A list containing all the + parameters (List[Union[torch.Tensor, np.ndarray]]): A list containing all the model parameters in their original shapes. Returns: - np.ndarray: A stack (single vertical array) of all the model parameters. + np.ndarray: A stack (single vertical array) of all the model parameters. """ return np.hstack( @@ -1065,20 +1063,18 @@ def _update_and_set_parameters(self, parameters: np.ndarray) -> None: parameter.data.copy_(operators[opi]) def _exec_kwargs_forward(self, input_data: dict = None): - """It executes the forward pass for the model when it receives more than one input. Args: - input_data dict: Data to be passed to the model. + input_data dict: Data to be passed to the model. """ return self.fun.forward(**input_data) def _exec_forward(self, input_data: Union[np.ndarray, torch.Tensor] = None): - """It executes the forward pass for the model. Args: - input_data (Union[np.ndarray, torch.Tensor]): Data to be passed to the model. + input_data (Union[np.ndarray, torch.Tensor]): Data to be passed to the model. """ @@ -1088,9 +1084,9 @@ def _fun_num(self, parameters: np.ndarray) -> Tuple[float]: """ Args: - parameters (np.ndarray): The stacked parameters defined for the model. + parameters (np.ndarray): The stacked parameters defined for the model. Returns: - Tuple[float]: The loss(es) defined for the optimization process. + Tuple[float]: The loss(es) defined for the optimization process. """ self._update_and_set_parameters(parameters) @@ -1106,8 +1102,8 @@ def _fun(self, parameters: np.ndarray) -> Tuple[float, np.ndarray]: parameters (np.ndarray): The stack of all the trainable parameters for the model. Returns: - Tuple[float, np.ndarray]: A tuple containing the value for the loss function and - the array of gradients for the model parameters. + Tuple[float, np.ndarray]: A tuple containing the value for the loss function and + the array of gradients for the model parameters. """ # Setting the new values for the model parameters @@ -1137,9 +1133,9 @@ def fit( """ Args: - input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as - input for the model. - target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. + input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as + input for the model. + target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. """ parameters_0 = self._stack_and_convert_parameters(self.state_0) diff --git a/simulai/parallel.py b/simulai/parallel.py index 4c18f422..89c55227 100644 --- a/simulai/parallel.py +++ b/simulai/parallel.py @@ -33,7 +33,7 @@ class PipelineMPI: """PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. - + """ def __init__( @@ -68,7 +68,7 @@ def _check_kwargs_consistency(self, kwargs: dict = None) -> int: Returns: int: Length of the batch sent for each worker. - + """ types = [type(value) for value in kwargs.values()] @@ -102,7 +102,7 @@ def _split_kwargs( kwargs_batch: A dictionary containing the kwargs to be sent for each worker. batch_size: The batch size, which corresponds to the number of elements to be sent for each worker. - + """ # Decrement rank and size by 1, because they are usually 0-indexed in Python @@ -155,7 +155,7 @@ def _exec_wrapper(self, kwargs: dict, total_size: int) -> None: Args: kwargs (dict): A dictionary containing kwargs for the worker. total_size (int): The total number of elements. - + """ comm = MPI.COMM_WORLD @@ -214,9 +214,7 @@ def _exec_wrapper(self, kwargs: dict, total_size: int) -> None: @property def success(self) -> bool: - """It returns True if the entire process worked without issues. - - """ + """It returns True if the entire process worked without issues.""" return all(self.status) @@ -225,7 +223,7 @@ def run(self, kwargs: dict = None) -> None: Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) - + """ comm = MPI.COMM_WORLD diff --git a/simulai/regression/__init__.py b/simulai/regression/__init__.py index a338ff49..b1275a21 100644 --- a/simulai/regression/__init__.py +++ b/simulai/regression/__init__.py @@ -16,7 +16,6 @@ from ._affine import AffineMapping from ._elm import ELM - # No back-propagation mechanisms from ._esn import DeepEchoStateNetwork, EchoStateNetwork, WideEchoStateNetwork from ._extended_opinf import ExtendedOpInf @@ -28,14 +27,8 @@ if engine == "pytorch": from ._pytorch._conv import ConvolutionalNetwork, ResConvolutionalNetwork - from ._pytorch._dense import ( - SLFNN, - ConvexDenseNetwork, - DenseNetwork, - Linear, - ResDenseNetwork, - ShallowNetwork, - ) + from ._pytorch._dense import (SLFNN, ConvexDenseNetwork, DenseNetwork, + Linear, ResDenseNetwork, ShallowNetwork) from ._pytorch._koopman import AutoEncoderKoopman, KoopmanNetwork from ._pytorch._numpy import LinearNumpy from ._pytorch._opinf import OpInfNetwork diff --git a/simulai/regression/_pytorch/_conv.py b/simulai/regression/_pytorch/_conv.py index cea4ab7c..f3c7f747 100644 --- a/simulai/regression/_pytorch/_conv.py +++ b/simulai/regression/_pytorch/_conv.py @@ -13,7 +13,7 @@ # limitations under the License. import importlib -from typing import List, Union, Optional +from typing import List, Optional, Union import numpy as np import torch @@ -56,7 +56,7 @@ def __init__( flatten: bool = False, name: str = None, ) -> None: - """ Basic Convolutional network. + """Basic Convolutional network. Args: layers (List[dict]): List of layer configuration dictionaries. @@ -65,8 +65,8 @@ def __init__( the convolutional network itself. case (str): The dimensionality case, '1d', '2d' or '3d'. last_activation (str): The activation for the last layer. - transpose (bool): Use transposed convolutions or not. - flatten (bool): Flatten the output (remove unitary layers) or not. + transpose (bool): Use transposed convolutions or not. + flatten (bool): Flatten the output (remove unitary layers) or not. name (str): A name for the network model. """ @@ -128,7 +128,7 @@ def __init__( def forward( self, input_data: Union[torch.Tensor, np.ndarray] = None ) -> torch.Tensor: - """ Convolutional network forward method. + """Convolutional network forward method. Args: input_data (Union[torch.Tensor, np.ndarray]): The input datasets. @@ -155,13 +155,13 @@ def __init__( Args: - stages (List[list]): List containing the configuration for each + stages (List[list]): List containing the configuration for each stage (seen as a subnetwork). activations (List[list]): Activations for all the layers. case (str): The dimensionality case, '1d', '2d' or '3d'. last_activation (str): The activation for the last layer. - transpose (bool): Use transposed convolutions or not. - flatten (bool): Flatten the output (remove unitary layers) or not. + transpose (bool): Use transposed convolutions or not. + flatten (bool): Flatten the output (remove unitary layers) or not. name (str): A name for the network model. """ @@ -230,7 +230,7 @@ def __init__( def forward( self, input_data: Union[torch.Tensor, np.ndarray] = None ) -> torch.Tensor: - """ Convolutional network forward method. + """Convolutional network forward method. Args: input_data (Union[torch.Tensor, np.ndarray]): The input datasets. diff --git a/simulai/rom/__init__.py b/simulai/rom/__init__.py index 79b3281e..fab93e88 100644 --- a/simulai/rom/__init__.py +++ b/simulai/rom/__init__.py @@ -16,7 +16,8 @@ assert engine is not None, "The variable engine was not defined." -from ._rom import DMD, GPOD, HOSVD, IPOD, POD, QQM, ROM, ByPassROM, IByPass, ParallelSVD +from ._rom import (DMD, GPOD, HOSVD, IPOD, POD, QQM, ROM, ByPassROM, IByPass, + ParallelSVD) if engine == "pytorch": pass diff --git a/simulai/rom/_rom.py b/simulai/rom/_rom.py index 189106dc..00d89817 100644 --- a/simulai/rom/_rom.py +++ b/simulai/rom/_rom.py @@ -789,7 +789,7 @@ def __init__(self, config=None): Args: config: (Default value = None) - + """ super().__init__() for key, value in config.items(): diff --git a/simulai/templates/__init__.py b/simulai/templates/__init__.py index 194a854b..2fa6a321 100644 --- a/simulai/templates/__init__.py +++ b/simulai/templates/__init__.py @@ -14,24 +14,14 @@ from simulai import engine -from ._templates import ( - NetworkInstanceGen, - ReservoirComputing, - autoencoder_auto, - cnn_autoencoder_auto, - mlp_autoencoder_auto, -) +from ._templates import (NetworkInstanceGen, ReservoirComputing, + autoencoder_auto, cnn_autoencoder_auto, + mlp_autoencoder_auto) if engine == "pytorch": - from ._pytorch_network import ( - ConvNetworkTemplate, - HyperTrainTemplate, - NetworkTemplate, - as_array, - as_tensor, - channels_dim, - guarantee_device, - ) + from ._pytorch_network import (ConvNetworkTemplate, HyperTrainTemplate, + NetworkTemplate, as_array, as_tensor, + channels_dim, guarantee_device) elif engine == "numpy": pass else: diff --git a/simulai/templates/_pytorch_network.py b/simulai/templates/_pytorch_network.py index a49ccfa5..05c107cd 100644 --- a/simulai/templates/_pytorch_network.py +++ b/simulai/templates/_pytorch_network.py @@ -31,7 +31,7 @@ def __init__(self, name: str = None, devices: str = None) -> None: Args: name (str): Name for the neural network model. - devices (str): Kind of device in which the model will run, + devices (str): Kind of device in which the model will run, 'cpu' or 'gpu'. """ @@ -64,26 +64,26 @@ def __init__(self, name: str = None, devices: str = None) -> None: @property def weights_l2(self) -> torch.Tensor: - """It evaluates the global L^2 norm of all the model coefficients. + """It evaluates the global L^2 norm of all the model coefficients. Returns: torch.Tensor: A tensor containing the value of this norm. - + """ return sum([torch.norm(weight, p=2) for weight in self.weights]) @property def weights_l1(self) -> torch.Tensor: - """It evaluates the global L^1 norm of all the model coefficients. + """It evaluates the global L^1 norm of all the model coefficients. Returns: torch.Tensor: A tensor containing the value of this norm. - + """ return sum([torch.norm(weight, p=1) for weight in self.weights]) @property def n_parameters(self) -> int: - """It evaluates the total number of parameters for the model.eval + """It evaluates the total number of parameters for the model.eval Returns: int: The total number of parameters for the model. """ @@ -127,7 +127,10 @@ def _get_from_guest(self, **kwargs) -> None: # Getting up activation if it exists def _get_operation( - self, operation: str = None, is_activation: bool = True, **kwargs, + self, + operation: str = None, + is_activation: bool = True, + **kwargs, ) -> callable: mod_items = dir(self.engine) mod_items_lower = [item.lower() for item in mod_items] @@ -174,14 +177,18 @@ def _setup_activations( activation_op = self._get_operation(operation=activation) if isinstance(activation_op, simulact.TrainableActivation): - - activations_list = [self._get_operation(operation=activation, - is_activation=True, device=self.device) - for i in range(n_layers - 1)] + activations_list = [ + self._get_operation( + operation=activation, is_activation=True, device=self.device + ) + for i in range(n_layers - 1) + ] else: - activations_list = [self._get_operation(operation=activation) - for i in range(n_layers - 1)] + activations_list = [ + self._get_operation(operation=activation) + for i in range(n_layers - 1) + ] return ( activations_list @@ -199,7 +206,7 @@ def _setup_activations( ) activations_list.append(activation_op) - + return activations_list, activation elif isinstance(activation, torch.nn.Module): @@ -221,13 +228,12 @@ def _setup_activations( activation_op.setup(device=self.device_type) activations_list.append(activation_op) - + return activations_list, activation else: raise Exception( - "The activation format," - f"{type(activation)} is not supported." + "The activation format," f"{type(activation)} is not supported." ) # Instantiating all the linear layers. @@ -351,33 +357,38 @@ def _set_parameter_from_tensor(self, data): return data # Setting up values for the model parameters. - def set_parameters(self, parameters:Union[torch.Tensor, np.ndarray]=None, requires_grad=True) -> None: + def set_parameters( + self, parameters: Union[torch.Tensor, np.ndarray] = None, requires_grad=True + ) -> None: """It overwrite the current parameters values with new ones. Args: parameters (List[torch.Tensor]): List of new values to overwrite the - current parameters. + current parameters. """ # Determining the kind of data structure to be converted from - struct_converter = { - np.ndarray : self._set_parameter_from_array, - torch.Tensor : self._set_parameter_from_tensor - }.get(type(parameters)) + struct_converter = { + np.ndarray: self._set_parameter_from_array, + torch.Tensor: self._set_parameter_from_tensor, + }.get(type(parameters)) for ll, layer in enumerate(self.layers_map): - self.layers[ll].weight = Parameter( data=struct_converter( - parameters[self.stitch_idx[layer[0]].flatten()].reshape(self.shapes_layers[ll][0]) + parameters[self.stitch_idx[layer[0]].flatten()].reshape( + self.shapes_layers[ll][0] + ) ), requires_grad=requires_grad, ) self.layers[ll].bias = Parameter( data=struct_converter( - parameters[self.stitch_idx[layer[1]].flatten()].reshape(self.shapes_layers[ll][1]) + parameters[self.stitch_idx[layer[1]].flatten()].reshape( + self.shapes_layers[ll][1] + ) ), requires_grad=requires_grad, ) @@ -385,7 +396,7 @@ def set_parameters(self, parameters:Union[torch.Tensor, np.ndarray]=None, requir # Detaching parameters from the backpropagation pipeline def detach_parameters(self) -> None: """Remove the parameters for the PyTorch graph, - it means that they will not be trainable. + it means that they will not be trainable. """ for param in self.parameters(): param.requires_grad = False @@ -396,11 +407,11 @@ def eval(self, input_data: Union[np.ndarray, torch.Tensor] = None) -> np.ndarray """It used the model to perform evaluations. Args: - input_data (Union[np.ndarray, torch.Tensor]): The input data used for the + input_data (Union[np.ndarray, torch.Tensor]): The input data used for the model evaluation. Returns: - np.ndarray: The result of that evaluation. + np.ndarray: The result of that evaluation. """ output_tensor = self.forward(input_data=input_data) @@ -551,8 +562,8 @@ def __init__(self, name: str = None, flatten: bool = None) -> None: """A basic template for convolutional neural networks. Args: - name (str): A name for the neural network model. - flatten (bool): Flatten the output or not. + name (str): A name for the neural network model. + flatten (bool): Flatten the output or not. """ super(ConvNetworkTemplate, self).__init__() @@ -753,7 +764,7 @@ def summary( """ Args: - input_data (Union[torch.Tensor, np.ndarray]): An input data used for + input_data (Union[torch.Tensor, np.ndarray]): An input data used for helping to construct the model summary. input_shape (list): When input_data is not provided, a shape for it can be used instead. diff --git a/simulai/templates/_templates.py b/simulai/templates/_templates.py index dff62c3a..7cc04d4d 100644 --- a/simulai/templates/_templates.py +++ b/simulai/templates/_templates.py @@ -17,8 +17,8 @@ from typing import Optional, Tuple, Union import numpy as np -import torch import scipy.sparse as sparse +import torch from simulai.abstract import Regression diff --git a/simulai/tokens.py b/simulai/tokens.py index e433539a..3641cedc 100644 --- a/simulai/tokens.py +++ b/simulai/tokens.py @@ -12,8 +12,9 @@ # See the License for the specific language governing permissions and # limitations under the License. -import sympy from typing import Union + +import sympy import torch # Token used for representing the operator differentiation diff --git a/simulai/utilities/__init__.py b/simulai/utilities/__init__.py index 2e35d1b3..9ea5b724 100644 --- a/simulai/utilities/__init__.py +++ b/simulai/utilities/__init__.py @@ -14,5 +14,5 @@ from ._makedirs_to_file import makedirs_to_file from ._tmp_dir import make_temp_directory -from .opinf_deviation import OpInfDeviation from ._visualization import view_api +from .opinf_deviation import OpInfDeviation diff --git a/simulai/utilities/_visualization.py b/simulai/utilities/_visualization.py index 9905738b..f16564da 100644 --- a/simulai/utilities/_visualization.py +++ b/simulai/utilities/_visualization.py @@ -1,4 +1,5 @@ from typing import Dict, Union + import numpy as np import torch diff --git a/simulai/workflows/__init__.py b/simulai/workflows/__init__.py index 557bafec..50128f27 100644 --- a/simulai/workflows/__init__.py +++ b/simulai/workflows/__init__.py @@ -13,15 +13,12 @@ # limitations under the License. from ._cloud_object_storage import CloudObjectStorage +from ._deeponet_workflows import ConvDeepONet from ._esn_modelpool_train import ( - ObjectiveESNIndependent, - define_reservoir_configs_for_affine_training, + ObjectiveESNIndependent, define_reservoir_configs_for_affine_training, optuna_assess_best_joint_solution_ESNIndependent, - optuna_assess_best_solution_ESNIndependent, - optuna_objectiveESNIndependent, -) + optuna_assess_best_solution_ESNIndependent, optuna_objectiveESNIndependent) from ._extrapolation import StepwiseExtrapolation from ._h5_comparison import compute_datasets_to_reference_norm from ._h5_ipod import dataset_ipod, pipeline_projection_error from ._parametric_hyperopt import ParamHyperOpt -from ._deeponet_workflows import ConvDeepONet diff --git a/simulai/workflows/_deeponet_workflows.py b/simulai/workflows/_deeponet_workflows.py index 1c3bb5d3..11c37de8 100644 --- a/simulai/workflows/_deeponet_workflows.py +++ b/simulai/workflows/_deeponet_workflows.py @@ -12,7 +12,7 @@ # See the License for the specific language governing permissions and # limitations under the License. -from typing import Optional, Tuple, List, Union +from typing import List, Optional, Tuple, Union from simulai.models import DeepONet from simulai.regression import ConvolutionalNetwork, DenseNetwork diff --git a/tests/config.py b/tests/config.py index b07f366d..4fd70864 100644 --- a/tests/config.py +++ b/tests/config.py @@ -19,6 +19,7 @@ # Author: Joao Lucas S. Almeida import os + import torch diff --git a/tests/devices/test_network_devices.py b/tests/devices/test_network_devices.py index e9db72b0..5422e419 100644 --- a/tests/devices/test_network_devices.py +++ b/tests/devices/test_network_devices.py @@ -56,107 +56,105 @@ def u(self, t, x, L: float = None, t_max: float = None) -> np.ndarray: ) * np.cos(5 * np.pi * (t / t_max - 1 / 2) ** 2) def test_densenetwork_optimization_and_persistency(self) -> None: + for architecture in ["DenseNetwork", "ResDenseNetwork", "ImprovedDenseNetwork"]: - print(f"Testing architecture: {architecture}.") - for DEVICE in ["cpu", "gpu", "tpu", None]: - try: - net = model_convex() + print(f"Testing architecture: {architecture}.") - lr = 5e-5 - n_epochs = 1_00 - n_train = 2_000 + for DEVICE in ["cpu", "gpu", None]: + + net = model_convex() - t_max = 10 - L = 5 - K = 512 - N = 10_000 + lr = 5e-5 + n_epochs = 1_00 + n_train = 2_000 - x_interval = [0, L] - time_interval = [0, t_max] + t_max = 10 + L = 5 + K = 512 + N = 10_000 - x = np.linspace(*x_interval, K) - t = np.linspace(*time_interval, N) + x_interval = [0, L] + time_interval = [0, t_max] - T, X = np.meshgrid(t, x, indexing="ij") - output_data = self.u(T, X, L=L, t_max=t_max) + x = np.linspace(*x_interval, K) + t = np.linspace(*time_interval, N) - positions = np.stack( - [X[::100].flatten(), T[::100].flatten()], axis=1 - ) - positions = 2 * positions / np.array([L, t_max]) - 1 + T, X = np.meshgrid(t, x, indexing="ij") + output_data = self.u(T, X, L=L, t_max=t_max) - optimizer_config = {"lr": lr} + positions = np.stack( + [X[::100].flatten(), T[::100].flatten()], axis=1 + ) + positions = 2 * positions / np.array([L, t_max]) - 1 - n_t, n_x = output_data.shape + optimizer_config = {"lr": lr} - x_i = np.random.randint(0, n_x, size=(n_train, 1)) - t_i = np.random.randint(0, n_t, size=(n_train, 1)) + n_t, n_x = output_data.shape - input_train = ( - 2 * np.hstack([x[x_i], t[t_i]]) / np.array([L, t_max]) - 1 - ) - output_train = output_data[t_i, x_i] + x_i = np.random.randint(0, n_x, size=(n_train, 1)) + t_i = np.random.randint(0, n_t, size=(n_train, 1)) - # Configuring Optimizer - params = {"lambda_1": 0.0, "lambda_2": 1e-14} + input_train = ( + 2 * np.hstack([x[x_i], t[t_i]]) / np.array([L, t_max]) - 1 + ) + output_train = output_data[t_i, x_i] - optimizer = Optimizer("adam", params=optimizer_config) + # Configuring Optimizer + params = {"lambda_1": 0.0, "lambda_2": 1e-14} - optimizer.fit( - op=net, - input_data=input_train, - target_data=output_train, - n_epochs=n_epochs, - loss="rmse", - params=params, - batch_size=1_00, - device=DEVICE, - use_jit=True, - ) + optimizer = Optimizer("adam", params=optimizer_config) - # First evaluation - approximated_data = net.eval(input_data=positions) + optimizer.fit( + op=net, + input_data=input_train, + target_data=output_train, + n_epochs=n_epochs, + loss="rmse", + params=params, + batch_size=1_00, + device=DEVICE, + use_jit=True, + ) - l2_norm = L2Norm() + # First evaluation + approximated_data = net.eval(input_data=positions) - projection_error = 100 * l2_norm( - data=approximated_data, - reference_data=output_data[::100], - relative_norm=True, - ) + l2_norm = L2Norm() - print(f"Projection error: {projection_error} %") + projection_error = 100 * l2_norm( + data=approximated_data, + reference_data=output_data[::100], + relative_norm=True, + ) - # Saving model - print("Saving model.") - saver = SPFile(compact=False) - saver.write( - save_dir="/tmp", - name="data_representation", - model=net, - template=model_convex, - ) + print(f"Projection error: {projection_error} %") - # Testing to reload from disk - saver = SPFile(compact=False) - net_reload = saver.read(model_path="/tmp/data_representation") + # Saving model + print("Saving model.") + saver = SPFile(compact=False) + saver.write( + save_dir="/tmp", + name="data_representation", + model=net, + template=model_convex, + ) - # Post-processing - approximated_data = net_reload.eval(input_data=positions) - approximated_data = approximated_data.reshape(-1, K) + # Testing to reload from disk + saver = SPFile(compact=False) + net_reload = saver.read(model_path="/tmp/data_representation") - l2_norm = L2Norm() + # Post-processing + approximated_data = net_reload.eval(input_data=positions) + approximated_data = approximated_data.reshape(-1, K) - projection_error = 100 * l2_norm( - data=approximated_data, - reference_data=output_data[::100], - relative_norm=True, - ) + l2_norm = L2Norm() - print(f"Projection error: {projection_error} %") + projection_error = 100 * l2_norm( + data=approximated_data, + reference_data=output_data[::100], + relative_norm=True, + ) - except Exception: - assert DEVICE == "tpu" + print(f"Projection error: {projection_error} %") - print("Device not supported.") diff --git a/tests/file/test_spfile.py b/tests/file/test_spfile.py index 23eb8cb8..503f8274 100644 --- a/tests/file/test_spfile.py +++ b/tests/file/test_spfile.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/math/test_metrics.py b/tests/math/test_metrics.py index b26bcacb..2ee568d4 100644 --- a/tests/math/test_metrics.py +++ b/tests/math/test_metrics.py @@ -19,12 +19,8 @@ import numpy as np import pytest -from simulai.metrics import ( - FeatureWiseErrorNorm, - L2Norm, - MemorySizeEval, - SampleWiseErrorNorm, -) +from simulai.metrics import (FeatureWiseErrorNorm, L2Norm, MemorySizeEval, + SampleWiseErrorNorm) from simulai.utilities import make_temp_directory diff --git a/tests/metrics/test_mahalanobis.py b/tests/metrics/test_mahalanobis.py index 5d799dcf..b9a3205f 100644 --- a/tests/metrics/test_mahalanobis.py +++ b/tests/metrics/test_mahalanobis.py @@ -1,4 +1,5 @@ from unittest import TestCase + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_conv_1d.py b/tests/network/test_conv_1d.py index 84bba8d1..5a7b8097 100644 --- a/tests/network/test_conv_1d.py +++ b/tests/network/test_conv_1d.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_conv_2d.py b/tests/network/test_conv_2d.py index f03b180b..cf4ba9d8 100644 --- a/tests/network/test_conv_2d.py +++ b/tests/network/test_conv_2d.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_deeponet.py b/tests/network/test_deeponet.py index 5e6bd73e..69558a69 100644 --- a/tests/network/test_deeponet.py +++ b/tests/network/test_deeponet.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional, Union from unittest import TestCase -from typing import Union, Optional + import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_flexible_deeponet.py b/tests/network/test_flexible_deeponet.py index 9e012933..96a587f3 100644 --- a/tests/network/test_flexible_deeponet.py +++ b/tests/network/test_flexible_deeponet.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest import TestCase from typing import Optional +from unittest import TestCase + import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_improved_deeponet.py b/tests/network/test_improved_deeponet.py index b4221008..7d305fe2 100644 --- a/tests/network/test_improved_deeponet.py +++ b/tests/network/test_improved_deeponet.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. -from unittest import TestCase from typing import Optional +from unittest import TestCase + import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_nif.py b/tests/network/test_nif.py index e7fa6510..3fba4c92 100644 --- a/tests/network/test_nif.py +++ b/tests/network/test_nif.py @@ -12,9 +12,11 @@ # See the License for the specific language governing permissions and # limitations under the License. +from typing import Optional, Union from unittest import TestCase -from typing import Union, Optional + import numpy as np + from tests.config import configure_dtype torch = configure_dtype() @@ -28,8 +30,8 @@ def model( n_outputs: int = 2, device: Optional[str] = "cpu", ): - from simulai.regression import DenseNetwork from simulai.models import NIF + from simulai.regression import DenseNetwork n_inputs = 4 n_outputs = n_outputs @@ -73,6 +75,7 @@ def model( return net + class TestNIF(TestCase): def setUp(self) -> None: pass @@ -95,7 +98,9 @@ def test_nif_forward(self): data_shape = torch.rand(1_000, 1) data_parameters = torch.rand(1_000, 4) - output = net.forward(input_shape=data_shape, input_parameter=data_parameters) + output = net.forward( + input_shape=data_shape, input_parameter=data_parameters + ) assert output.shape[1] == 2, "The network output is not like expected." output = net.eval_subnetwork(name="parameter", input_data=data_parameters) @@ -134,5 +139,3 @@ def test_nif_train(self): output = net.forward(input_shape=data_shape, input_parameter=data_parameter) assert output.shape[1] == 1, "The network output is not like expected." - - diff --git a/tests/network/test_template_gen.py b/tests/network/test_template_gen.py index e7e8a5b9..dddeb3a4 100644 --- a/tests/network/test_template_gen.py +++ b/tests/network/test_template_gen.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/network/test_transformer.py b/tests/network/test_transformer.py index eccc0f22..e383a4a7 100644 --- a/tests/network/test_transformer.py +++ b/tests/network/test_transformer.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() @@ -24,8 +25,8 @@ from simulai import ARRAY_DTYPE from simulai.file import SPFile -from simulai.optimization import Optimizer from simulai.models import Transformer +from simulai.optimization import Optimizer from simulai.regression import DenseNetwork DEVICE = configure_device() @@ -81,7 +82,6 @@ def test_instantiate(self): print(estimated_output_data.shape) - def test_instantiate_inputs_dif_outputs(self): num_heads = 4 embed_dim = 128 @@ -111,7 +111,6 @@ def test_instantiate_inputs_dif_outputs(self): "name": "mlp_layer", } - # Instantiating and training the surrogate model transformer = Transformer( num_heads_encoder=num_heads, diff --git a/tests/network/test_unet.py b/tests/network/test_unet.py index 298cd756..d0552215 100644 --- a/tests/network/test_unet.py +++ b/tests/network/test_unet.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/normalization/test_normalization.py b/tests/normalization/test_normalization.py index 86dbd2a2..3046c37f 100644 --- a/tests/normalization/test_normalization.py +++ b/tests/normalization/test_normalization.py @@ -2,7 +2,8 @@ import numpy as np -from simulai.normalization import UnitaryNormalization, UnitarySymmetricalNormalization +from simulai.normalization import (UnitaryNormalization, + UnitarySymmetricalNormalization) class TestNormalization(TestCase): diff --git a/tests/parallelism/test_modelpool_esn.py b/tests/parallelism/test_modelpool_esn.py index f3123cea..1ae0c071 100644 --- a/tests/parallelism/test_modelpool_esn.py +++ b/tests/parallelism/test_modelpool_esn.py @@ -20,7 +20,6 @@ from unittest import TestCase import numpy as np - # import optuna import optuna @@ -28,12 +27,10 @@ from simulai.regression import EchoStateNetwork from simulai.special import reservoir_generator from simulai.utilities import make_temp_directory -from simulai.workflows import ( - ObjectiveESNIndependent, - define_reservoir_configs_for_affine_training, - optuna_assess_best_solution_ESNIndependent, - optuna_objectiveESNIndependent, -) +from simulai.workflows import (ObjectiveESNIndependent, + define_reservoir_configs_for_affine_training, + optuna_assess_best_solution_ESNIndependent, + optuna_objectiveESNIndependent) class TestModelPoolESN(TestCase): diff --git a/tests/preprocessing/test_batch_copy.py b/tests/preprocessing/test_batch_copy.py index 0b849f62..e2f16715 100644 --- a/tests/preprocessing/test_batch_copy.py +++ b/tests/preprocessing/test_batch_copy.py @@ -13,7 +13,6 @@ # limitations under the License. import os - # (C) Copyright IBM Corporation 2017, 2018, 2019 # U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted # by GSA ADP Schedule Contract with IBM Corp. diff --git a/tests/preprocessing/test_meanevaluation.py b/tests/preprocessing/test_meanevaluation.py index 0d688d0b..7b5e7d8b 100644 --- a/tests/preprocessing/test_meanevaluation.py +++ b/tests/preprocessing/test_meanevaluation.py @@ -13,7 +13,6 @@ # limitations under the License. import os - # (C) Copyright IBM Corporation 2017, 2018, 2019 # U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted # by GSA ADP Schedule Contract with IBM Corp. diff --git a/tests/preprocessing/test_reshaper.py b/tests/preprocessing/test_reshaper.py index 5f37dc6d..f3dcfd22 100644 --- a/tests/preprocessing/test_reshaper.py +++ b/tests/preprocessing/test_reshaper.py @@ -13,7 +13,6 @@ # limitations under the License. import os - # (C) Copyright IBM Corporation 2017, 2018, 2019 # U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted # by GSA ADP Schedule Contract with IBM Corp. diff --git a/tests/preprocessing/test_sampler.py b/tests/preprocessing/test_sampler.py index baf3876d..0b57ce80 100644 --- a/tests/preprocessing/test_sampler.py +++ b/tests/preprocessing/test_sampler.py @@ -13,7 +13,6 @@ # limitations under the License. import os - # (C) Copyright IBM Corporation 2017, 2018, 2019 # U.S. Government Users Restricted Rights: Use, duplication or disclosure restricted # by GSA ADP Schedule Contract with IBM Corp. diff --git a/tests/preprocessing/test_tokenizer.py b/tests/preprocessing/test_tokenizer.py index 00bfcbb2..11890429 100644 --- a/tests/preprocessing/test_tokenizer.py +++ b/tests/preprocessing/test_tokenizer.py @@ -14,14 +14,14 @@ import random from unittest import TestCase + import numpy as np from simulai.io import Tokenizer -class TestTokenizer(TestCase): +class TestTokenizer(TestCase): def test_time_indexer(self): - n_samples = 100 n_series_input = 3 n_series_output = 2 @@ -33,14 +33,17 @@ def test_time_indexer(self): tokenizer = Tokenizer(kind="time_indexer") - input_dataset = tokenizer.generate_input_tokens(input_data, num_step=num_step, step=step) - target_dataset = tokenizer.generate_target_tokens(target_data, num_step=num_step) + input_dataset = tokenizer.generate_input_tokens( + input_data, num_step=num_step, step=step + ) + target_dataset = tokenizer.generate_target_tokens( + target_data, num_step=num_step + ) print(f"Input shape: {input_dataset.shape}") print(f"Target shape: {target_dataset.shape}") def test_time_example(self): - n_samples = 10 num_step = 5 step = 0.1 @@ -50,15 +53,23 @@ def test_time_example(self): tokenizer = Tokenizer(kind="time_indexer") - input_dataset = tokenizer.generate_input_tokens(input_data, num_step=num_step, step=step) - target_dataset = tokenizer.generate_target_tokens(target_data, num_step=num_step) - - assert input_dataset.shape[1:] == (num_step, 2), "The input dataset has not the proper shape" - assert target_dataset.shape[1:] == (num_step, 1), "The target dataset has not the proper shape" - + input_dataset = tokenizer.generate_input_tokens( + input_data, num_step=num_step, step=step + ) + target_dataset = tokenizer.generate_target_tokens( + target_data, num_step=num_step + ) + + assert input_dataset.shape[1:] == ( + num_step, + 2, + ), "The input dataset has not the proper shape" + assert target_dataset.shape[1:] == ( + num_step, + 1, + ), "The target dataset has not the proper shape" def test_time_deeponet_example(self): - n_samples = 10 num_step = 5 step = 0.1 @@ -68,10 +79,19 @@ def test_time_deeponet_example(self): tokenizer = Tokenizer(kind="time_deeponet_indexer") - input_branch_dataset, input_trunk_dataset = tokenizer.generate_input_tokens(input_data, num_step=num_step, step=step) - target_dataset = tokenizer.generate_target_tokens(target_data, num_step=num_step) - - assert input_branch_dataset.shape[1:] == (1,), "The input branch dataset has not the proper shape" - assert input_trunk_dataset.shape[1:] == (1,), "The input trunk dataset has not the proper shape" - assert target_dataset.shape[1:] == (1,), "The target dataset has not the proper shape" - + input_branch_dataset, input_trunk_dataset = tokenizer.generate_input_tokens( + input_data, num_step=num_step, step=step + ) + target_dataset = tokenizer.generate_target_tokens( + target_data, num_step=num_step + ) + + assert input_branch_dataset.shape[1:] == ( + 1, + ), "The input branch dataset has not the proper shape" + assert input_trunk_dataset.shape[1:] == ( + 1, + ), "The input trunk dataset has not the proper shape" + assert target_dataset.shape[1:] == ( + 1, + ), "The target dataset has not the proper shape" diff --git a/tests/residuals/test_symbolicoperator.py b/tests/residuals/test_symbolicoperator.py index 0d46e05e..2df1f408 100644 --- a/tests/residuals/test_symbolicoperator.py +++ b/tests/residuals/test_symbolicoperator.py @@ -16,6 +16,7 @@ from unittest import TestCase import numpy as np + from tests.config import configure_dtype torch = configure_dtype() diff --git a/tests/rom/test_gappy_pca_decomposition.py b/tests/rom/test_gappy_pca_decomposition.py index 64c97dcb..213c0bb8 100644 --- a/tests/rom/test_gappy_pca_decomposition.py +++ b/tests/rom/test_gappy_pca_decomposition.py @@ -18,7 +18,8 @@ from simulai.math.progression import gp from simulai.rom import GPOD -from simulai.special import Scattering, bidimensional_map_nonlin_3, time_function +from simulai.special import (Scattering, bidimensional_map_nonlin_3, + time_function) class TestPCADecomposition(TestCase): diff --git a/tests/rom/test_hosvd.py b/tests/rom/test_hosvd.py index 3df0bb13..83f8a97b 100644 --- a/tests/rom/test_hosvd.py +++ b/tests/rom/test_hosvd.py @@ -19,7 +19,8 @@ from simulai.file import load_pkl from simulai.metrics import L2Norm from simulai.rom import HOSVD -from simulai.special import Scattering, bidimensional_map_nonlin_3, time_function +from simulai.special import (Scattering, bidimensional_map_nonlin_3, + time_function) class TestHOSVDDecomposition(TestCase): diff --git a/tests/rom/test_ipca_datapreparer.py b/tests/rom/test_ipca_datapreparer.py index 2954abee..5d3873f7 100644 --- a/tests/rom/test_ipca_datapreparer.py +++ b/tests/rom/test_ipca_datapreparer.py @@ -24,7 +24,8 @@ from simulai.metrics import L2Norm, MeanEvaluation from simulai.rom import IPOD from simulai.simulation import Pipeline -from simulai.special import Scattering, bidimensional_map_nonlin_3, time_function +from simulai.special import (Scattering, bidimensional_map_nonlin_3, + time_function) from simulai.utilities import make_temp_directory diff --git a/tests/rom/test_ipca_decomposition.py b/tests/rom/test_ipca_decomposition.py index 92dfef25..69f940e4 100644 --- a/tests/rom/test_ipca_decomposition.py +++ b/tests/rom/test_ipca_decomposition.py @@ -24,7 +24,8 @@ from simulai.metrics import L2Norm from simulai.rom import IPOD from simulai.simulation import Pipeline -from simulai.special import Scattering, bidimensional_map_nonlin_3, time_function +from simulai.special import (Scattering, bidimensional_map_nonlin_3, + time_function) from simulai.utilities import make_temp_directory diff --git a/tests/rom/test_pca_decomposition.py b/tests/rom/test_pca_decomposition.py index 11764889..b61c177c 100644 --- a/tests/rom/test_pca_decomposition.py +++ b/tests/rom/test_pca_decomposition.py @@ -19,7 +19,8 @@ from simulai.math.filtering import SVDThreshold from simulai.math.progression import gp from simulai.rom import POD -from simulai.special import Scattering, bidimensional_map_nonlin_3, time_function +from simulai.special import (Scattering, bidimensional_map_nonlin_3, + time_function) class TestPCADecomposition(TestCase):