From 670776e88bee184c1792b2ad5572fdb85b4fc60d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Jo=C3=A3o=20Lucas=20de=20Sousa=20Almeida?= Date: Mon, 13 Nov 2023 11:14:58 -0300 Subject: [PATCH] Deployed c701a06 with MkDocs version: 1.5.3 --- index.html | 21 +- objects.inv | Bin 2879 -> 2982 bytes search.html | 21 +- search/search_index.json | 2 +- simulai_activations/index.html | 21 +- simulai_batching/index.html | 21 +- simulai_file/index.html | 21 +- simulai_io/index.html | 21 +- .../simulai_models_autoencoder/index.html | 21 +- .../simulai_models_deeponet/index.html | 21 +- .../simulai_models_transformer/index.html | 21 +- simulai_models/simulai_models_unet/index.html | 21 +- .../simulai_losses/index.html | 3247 +++++++++++++++++ .../simulai_optimizer/index.html | 31 +- simulai_parallel/index.html | 21 +- simulai_regression/simulai_dense/index.html | 21 +- simulai_regression/simulai_opinf/index.html | 21 +- simulai_residuals/index.html | 21 +- simulai_rom/simulai_rom/index.html | 21 +- sitemap.xml.gz | Bin 127 -> 127 bytes 20 files changed, 3573 insertions(+), 22 deletions(-) create mode 100644 simulai_optimization/simulai_losses/index.html diff --git a/index.html b/index.html index 2223e46..6e4e294 100644 --- a/index.html +++ b/index.html @@ -244,7 +244,26 @@ diff --git a/objects.inv b/objects.inv index 684921f02fad529a3e007757d7d05d57ef35124f..7f81bff159b09bd7b368fbb295872d1b0364ed83 100644 GIT binary patch delta 2872 zcmV-83&-@o7N!@Fi+^3qa@#f(-S;ap({8Iwwd}4H!ki8R}IAb)XB8k)5{yXT_q;7Hpx zp->z0NDJD6d*=|3(JH~5O}Qa$e$T4A#qZsRCno5clyyNHU89DJJt16e z3x#7OLzyeGBlk9|Gq`KrXi=O?ys#@9zD$%REy?+C!fJXw%qDQgL1)eC~gvhEb z%lj7j0bP&>`ju9K`}+120BHOLslStgZN0hr%K;s942>CB1`x1cXLz8&WI}<<;*Zis z8|fYXdPA)9C8#A)Ca^SSI1Y*d=qY|>l42b_i_m!3Jm>VvS zGP3~$?0=dW9>~ll6tJv)g(#Eb*|o#v02dxZN0Y8{$7YL)$)y1V-f40G(ID|c4bMLmsAvgb6%IEN;3Wq*vZG> zz*ER6scN&t!$r@Qm8tpUVk;}OHa$(!nP8wf!#LGYNrrRTL(iJdCKsQtlMG^sn|uIw z9Dm&+nx$VOI;zAD=qS$9kn^lU9aG{}=sRiHe&f0?q|i+~!e4yg`=6?uo}o31i`_X` z*6gk%=O9Ww6O6f1mepgP!$N*{*oV(5&cr#Ko_eVrQ=MVCHPCg7-?=h*-j#mLoML|b zNNIg_sXqmwN8HK?UeVSH2OGnF(7}oJhkwn)?nYkt#G8i%3=peWkv|pmncWl=If*K4 zf@IDplJ)+olr!KX-6Hpq3Y(yhW)#W#=nECd-7`W=&@*3$=Scd!O+8|J3UYzMB!>_{ zrh*WB$Wr0L{}yDAohfRWI?xYcZX=0ZGX`r z=@@WBv`lsxkaTpIAsVzdi%43=*iKFJiFxTgb((5DC#kux0F%!kog&ODH(10B(kVi_ zG=llZXijSFDz^-o#4XFFll@yzi+8%#e)DSPc+|X!S z@zqO9hp?oR`ivu5t>pnUvSx(|S-J9*GO2dS6|G+PXdGGeK!Kn&+47}QL~BjFIOu3F zHJhFRq?12pOw^LJW7Eh9Zq2Lh7+3@)T~;ReQ7UamA`r!@G*Oxpj<+#C*C` zs7|%*HRC{0>pl^bK)#+7r9BDnIdn8AniJo9n5eL|YmYyrZNrf=MOzZqcOI-LElOCT z=xE%Uhi6i+^FVP<4_jlLQh!30l)0whLFCrsIY{(cOs9QtWR1}w3L(SPt;~NI9u>Wb z$JJ&z$W}MQ$7oDqHz!Mg;x};|z$^zj$!7Q%jY*R1WC>9GipMRqB9ODHMRqG29qM@H zQz0fPuzvECh0s-?waQnX|B7W&2dNf))%ll*-65Vcp8Gk{!OUZ%4u7%BNdv?nW(oe5 zI4OXbz^q`Nc3!uvWdHT=ZhMvI^Cku7-!GjgNqa{$p|ZYHZ8>?Jh2u?Z27;^p2mQhL z@s;nD|5Ois>rq_?w-Fd_S&I#Rz4~&o{^K(j!n;=rc`+4Un4ms!;^QkG>9xMcCK^3K zZ>)1xzwC#>Ey*dWw|}S5$qcU0ByjoqrM%$<%dhJ0y*XT49yV04?T*M9?Kr<2|7#9l zAll%2bCcGbo3!pcT=51LRgIEYUR(!7XIx|l%H)gfK%`q=mVTactykX6t|B@{g|4F3 z%E9dh-%ha49T*Bu$o^80vk{z&vn>Y$;RZMR8{hPA(&c{>Y=3}FItOh0E#SH(SbA3* zj+=}y0iZtd76a{;SuKVpr~~%WVk&PVl_;PBqJZc_&YcGRq^c&3oC9dFxMnpi zSVb?cSGp2-xNcf8>Y^9T&h$p501&aub1HOi)zyPE^iIz5qHFI5UOm$1kF*l>^7KqV zZO|#;*pb^VB!2+bRd{1i*uMcD%z)l2N&6#ZPkfd}BMnL)^ib%nKfoqw_oC%lwJVK( zZ6g@=s$NyMp1sLyR^8f>79X>cv<-XCwye}l0zlNO?4GoEa9^lMA~W1cKB)4pFMr77 z^e-fQR??F98}m4tEpIRaBAU08fPe)hRVK(I#f2?1^M5Pgfsptj1#Qs*<#qjClt6~& z#UpL9igd3;$LQ4kBWmP@Pr*c={Z)MAWk7|uzZWJLe*O8A{@vs-B=p|xQIY#}eJ=}B z{)FkWDI|V3OyQCHqaT%OX|VJd$HoG~$)hhEVmPOWZUNI8BZ|3)a#I@WkSJ~i(=Ebr zR}GM$xqpc^ky~qqluQ^kT_JupTnBMPZzT38Y@a;b_yMU+S!TOG}TH(r>#8CQGY{p1fpfi%3|iRDZ#fr0&rH1sq@kC{a_QLWbx-OoM_L zqC&e_7$@Th>7~PX0}m9T;z2~9dc=qgQikXn6EsMd{WNftuKCemi!Nn%0w|$J@h3zz zjDi@T0?`~(KtBzJJrbybDI7ooZdMG?5M7vg(j$*m84t*4cd?EqY<}_raUFmR2i$)h zi+?`XhZdsY-Zf;)Xw{+t{!4Nw*70i*I+he9LU#};OpVPBP*Ex-RGZdP zP*F!Ps2bIeP$3=8NCPhye_mgG)Xo;Xd_Vp?b^9UExwGv8`Ri)^9Y(*8#NoF+vh)M6 z|79aj5cl6K@&vLyhIApUpmtbc*kKP6k$;VSd}K2A7znvI;0QVFqOuW{PG~kUk{Ovx zgj6DB5+RY%c|4q+gU*(t_~^X;^UnoHgN$2jPTv)dpvP!}_2i3Be1x^T@JMTS5HW_z z=4=1b96c)-3}DW?G8eG+-fLh`iUm*DKFS0arC32Qa#$Ylx#5)udQ)Wko6bDY=YQ`o zQyARuCG%j>zi~`qvOSq|Q>ahk2J98Y<12&5SFs*nxje3|fI`3xb}?2)A=nnPSgWQG z4u@5YRaF>AA8*2kI`I^&L5BFyV8d7!x=a}1L5l?=9q2G1g8w}17wJBS`C`0hwVtke z^u-;;+M(ktsl(CmO^Q!X>@G$RWihNr{gEdS29}tPnZz6LZKsx8*y3NQ7h7rz#ei+`OWIg9F*CI*HEj{|C;&Lm)~ER&Oi9R-}~dkx0fHvASj*ZQivn|0ydX#Jk_# ziQz-Vh;yVd{tAD)GYVwQ(lcc+&DGlEJu;DdfO;M`U%Whjr-bKM~Sw&aZ zSD(~>OeN9(vwtM`w3XWoY$kzwnzL@%jar@_2qh0htQOPWr2X}b-@D-CL0BAi$AI| z`k`uvKZRyy5(24%d3bbkr{Om*ZEqJA+>aU6S~YMat$_@+cr-%oF(cx?(A~}|bj*z( zPby{$1b$sFYkJs1}0Q$$J(9n7#G*=)BM3N!wNtTmG;7fi~>l;3_?Q9$@qs zdq*4E5jj?r?EgU|f%e~EqewO1&`PZSyOo?arGNTE_u=Bg)DOc4-_QS8kYii`*VkVt z8uXD=Ir;9R9~6nUa|CyPBLrDECY-)ylc8p6cuu^1J;xe9>?wPEqN(m)fb+36@)nT)+68DwC&O>8Hv$ z=EskewwD*?TM%aAMn&+3N+%9BhQrX&iGL2q7Gis&KKR7DhYSjkQgM<$RrHzNR1`Uh zIyOTx7ZTZee_g8?@R4qi`$)%T=%a;1wm$ko1#-ZeWe70#O%43iZ3bPy| z0GSG6_>iT-$Nv^p9$Q=TzJY7Pjf8Zmn+WMdE3s%_B=5@m!gDlz7PFwckGo?$EPsyZ zkbI2X7%#I^1|%OnGsc7Q%OaAODckF5IWeCFPo1{Co>$bqu>gxNkX{q^l^bkgf%KX% zE{$OQDa|WwT;W8t&VuG z$5-E4dW>bA)E64bZXFMxkv(fl$j-H=lv%witz`G6N8`w*M+pS4*_JO|BH3%^#la+l z>Dl%SAfNrIVy2el9h+87aBE)e#K2)F>#{QAC%JT8i9j@~%OrWuINqlCB!7?jyA-Q8 z^Y3^KDoKtr?s%tou*WxZco<;Nc2x!6?o4@tpa)hfNCGyYc!{-ZmU0bF?Mn{w_cjr9~OH zC?*-d=kb};>pW0g%EQ(euYZ-8C1vR;dJy^j1P&6j7u#(g99vU5#35vu`kjRj!-JzY z@wnP<2f5Vk_!N&>%FU}1;rLBl2e8{gPO=@J;xS8-y($roU-7zy(FAfB_&1enK?RJ_h>RX`sk&;;T0<{+y1R9 z+_<^us9>8dQRCrIoqvbFS^_LWMa6}n;tk9|1}CpP6OE2eI3tac*=MFv=yZkVn;6f6 zM~!EyjWgie?xdM9?S6t>>2nI2p&lIM#WqbRI?=EID_JsTlYsa`&YgjNbk&lMR28jO zSFEKKYv}pa+O(vOpPR2ars!3-wY^s&0wT6$Nrma%f@VlZ?|;;sBKr1z;LRg_{zw}^ zFODZ8dV`JuhmPFzArV;L{s2S5{r|y5E1>sE*8a%Z6Q5O~kq)H~1{8Yh_t-4$UP)dw z+uHih@o4O2yKZg+dy}`Uxpk9PAB&pEjy)F}RvRV(Aoa4iClU|$g^DaT<0~nIs(X$i*SKRe3ODobfEg&uxA&@ zXkIGi$Z znDQsgDw`wmyJ3z;?vG(q>RW@efN^ZyV0d-R8;2ClIe*b_V7|vlQXWu#Ny|PYNn2z3 zO?bXl11uQ6MBBujdz&N^MzY_KXf zYQbp42p5ROF$acuu$+-V9n5h63%FUapdr36^M7O}PhDBBV;E1d4)-~J34-{3*H%1m zcPX7dxBC{Naql{^VYHEGz<)>yWgRY;hGZxGwoR0wh!{><@I-%{4Nql-}T5c4B-C# z4u63V|8H&x1i2nV`Uq=iIjk|tVFyXb#x6cF89N3-F7_OuhFwxNlF|vyCPp$7bBT~j zq)Z|tGC7a?+jG#_auT1M*T4Th2YFC&s~?5$*G~sw5Srk;`4SSJP}+TXVrln?l!7Xj zYyX)&da+@!z`Wned|=~uuTeqUECgbQD1RF+$+1STYB(P7`SIEVy*b(azLx;{@=Y$9 zg8%I-0V?MAu4qcGCv$!Y{Yl)&UPEepZK?5fYK^abHE!Gi#lS7)V%-_VupPzX+%?5$ zcuK{(tBTVY;%$6rB_4wfWJn!a$}qJGeMOj1gN_nRtUylzBGg|%`6X7Lr}$E8&q-N& zrrl#GcM|JHr_;%%h> diff --git a/search.html b/search.html index fa04535..771e52a 100644 --- a/search.html +++ b/search.html @@ -244,7 +244,26 @@ diff --git a/search/search_index.json b/search/search_index.json index 83d004b..712df64 100644 --- a/search/search_index.json +++ b/search/search_index.json @@ -1 +1 @@ -{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"red { color: red } Welcome to SIMUL ai # An extensible Python package with data-driven pipelines for physics-informed machine learning. The SimulAI toolkit provides easy access to state-of-the-art models and algorithms for physics-informed machine learning. Currently, it includes the following methods described in the literature: Physics-Informed Neural Networks (PINNs) Deep Operator Networks (DeepONets) Variational Encoder-Decoders (VED) Operator Inference (OpInf) Koopman Autoencoders (experimental) Echo State Networks (experimental GPU support) Transformers U-Nets In addition to the methods above, many more techniques for model reduction and regularization are included in SimulAI. See documentation . Installing # Python version requirements: 3.8 <= python <= 3.11 Using pip # For installing the most recent stable version from PyPI: pip install simulai-toolkit For installing from the latest commit sent to GitHub (just for testing and developing purposes): pip uninstall simulai_toolkit pip install -U git+https://github.com/IBM/simulai@$(git ls-remote git@github.com:IBM/simulai.git | head -1 | awk '{print $1;}')#egg=simulai_toolkit Contributing code to SimulAI # If you are interested in directly contributing to this project, please see CONTRIBUTING . Using MPI # Some methods implemented on SimulAI support multiprocessing with MPI. In order to use it, you will need a valid MPI distribution, e.g. MPICH, OpenMPI. As an example, you can use conda to install MPICH as follows: conda install -c conda-forge mpich gcc Issues with macOS # If you have problems installing gcc using the command above, we recommend you to install it using Homebrew . Using Tensorboard # Tensorboard is supported for monitoring neural network training tasks. For a tutorial about how to set it see this example . Documentation # Please, refer to the SimulAI API documentation before using the toolkit. Examples # Additionally, you can refer to examples in the respective folder . License # This software is licensed under Apache license 2.0. See LICENSE . Contributing code to SimulAI # If you are interested in directly contributing to this project, please see CONTRIBUTING . How to cite SimulAI in your publications # If you find SimulAI to be useful, please consider citing it in your published work: @misc{simulai, author = {IBM}, title = {SimulAI Toolkit}, subtitle = {A Python package with data-driven pipelines for physics-informed machine learning}, note = \"https://github.com/IBM/simulai\", doi = {10.5281/zenodo.7351516}, year = {2022}, } or, via Zenodo: @software{joao_lucas_de_sousa_almeida_2023_7566603, author = {Jo\u00e3o Lucas de Sousa Almeida and Leonardo Martins and Tar\u0131k Kaan Ko\u00e7}, title = {IBM/simulai: 0.99.13}, month = jan, year = 2023, publisher = {Zenodo}, version = {0.99.25}, doi = {10.5281/zenodo.7566603}, url = {https://doi.org/10.5281/zenodo.7566603} } Publications # Jo\u00e3o Lucas de Sousa Almeida, Pedro Roberto Barbosa Rocha, Allan Moreira de Carvalho and Alberto Costa Nogueira Jr. A coupled Variational Encoder-Decoder - DeepONet surrogate model for the Rayleigh-B\u00e9nard convection problem. In When Machine Learning meets Dynamical Systems: Theory and Applications, AAAI, 2023. Jo\u00e3o Lucas S. Almeida, Arthur C. Pires, Klaus F. V. Cid, and Alberto C. Nogueira Jr. Non-intrusive operator inference for chaotic systems. IEEE Transactions on Artificial Intelligence, pages 1--14, 2022. Pedro Roberto Barbosa Rocha, Marcos Sebasti\u00e3o de Paula Gomes, Allan Moreira de Carvalho, Jo\u00e3o Lucas de Sousa Almeida and Alberto Costa Nogueira Jr. Data-driven reduced-order model for atmospheric CO2 dispersion. In AAAI 2022 Fall Symposium: The Role of AI in Responding to Climate Challenges, 2022. Pedro Roberto Barbosa Rocha, Jo\u00e3o Lucas de Sousa Almeida, Marcos Sebasti\u00e3o de Paula Gomes, Alberto Costa Nogueira, Reduced-order modeling of the two-dimensional Rayleigh--B\u00e9nard convection flow through a non-intrusive operator inference, Engineering Applications of Artificial Intelligence, Volume 126, Part B, 2023, 106923, ISSN 0952-1976, https://doi.org/10.1016/j.engappai.2023.106923 . ( https://www.sciencedirect.com/science/article/pii/S0952197623011077 ) References # Jaeger, H., Haas, H. (2004). \\\"Harnessing Nonlinearity: Predicting Chaotic Systems and Saving Energy in Wireless Communication,\\\" Science , 304 (5667): 78--80. \\< https://doi.org/10.1126/science.1091277 >`_. Lu, L., Jin, P., Pang, G., Zhang, Z., Karniadakis, G. E. (2021). \\\"Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators,\\\" Nature Machine Intelligence , 3 (1): 218--229. ISSN: 2522-5839. \\< https://doi.org/10.1038/s42256-021-00302-5 >`_. Eivazi, H., Le Clainche, S., Hoyas, S., Vinuesa, R. (2022) \\\"Towards extraction of orthogonal and parsimonious non-linear modes from turbulent flows\\\" Expert Systems with Applications , 202 . ISSN: 0957-4174. \\< https://doi.org/10.1016/j.eswa.2022.117038 >`_. Raissi, M., Perdikaris, P., Karniadakis, G. E. (2019). \\\"Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations,\\\" Journal of Computational Physics , 378 (1): 686-707. ISSN: 0021-9991. \\< https://doi.org/10.1016/j.jcp.2018.10.045 >`_. Lusch, B., Kutz, J. N., Brunton, S.L. (2018). \\\"Deep learning for universal linear embeddings of nonlinear dynamics,\\\" Nature Communications , 9 : 4950. ISSN: 2041-1723. \\< https://doi.org/10.1038/s41467-018-07210-0 >`_. McQuarrie, S., Huang, C. and Willcox, K. (2021). \\\"Data-driven reduced-order models via regularized operator inference for a single-injector combustion process,\\\" Journal of the Royal Society of New Zealand , 51 (2): 194-211. ISSN: 0303-6758. \\< https://doi.org/10.1080/03036758.2020.1863237 >`_.","title":"Home"},{"location":"#welcome-to-simulai","text":"An extensible Python package with data-driven pipelines for physics-informed machine learning. The SimulAI toolkit provides easy access to state-of-the-art models and algorithms for physics-informed machine learning. Currently, it includes the following methods described in the literature: Physics-Informed Neural Networks (PINNs) Deep Operator Networks (DeepONets) Variational Encoder-Decoders (VED) Operator Inference (OpInf) Koopman Autoencoders (experimental) Echo State Networks (experimental GPU support) Transformers U-Nets In addition to the methods above, many more techniques for model reduction and regularization are included in SimulAI. See documentation .","title":"Welcome to SIMULai"},{"location":"#installing","text":"Python version requirements: 3.8 <= python <= 3.11","title":"Installing"},{"location":"#using-pip","text":"For installing the most recent stable version from PyPI: pip install simulai-toolkit For installing from the latest commit sent to GitHub (just for testing and developing purposes): pip uninstall simulai_toolkit pip install -U git+https://github.com/IBM/simulai@$(git ls-remote git@github.com:IBM/simulai.git | head -1 | awk '{print $1;}')#egg=simulai_toolkit","title":"Using pip"},{"location":"#contributing-code-to-simulai","text":"If you are interested in directly contributing to this project, please see CONTRIBUTING .","title":"Contributing code to SimulAI"},{"location":"#using-mpi","text":"Some methods implemented on SimulAI support multiprocessing with MPI. In order to use it, you will need a valid MPI distribution, e.g. MPICH, OpenMPI. As an example, you can use conda to install MPICH as follows: conda install -c conda-forge mpich gcc","title":"Using MPI"},{"location":"#issues-with-macos","text":"If you have problems installing gcc using the command above, we recommend you to install it using Homebrew .","title":"Issues with macOS"},{"location":"#using-tensorboard","text":"Tensorboard is supported for monitoring neural network training tasks. For a tutorial about how to set it see this example .","title":"Using Tensorboard"},{"location":"#documentation","text":"Please, refer to the SimulAI API documentation before using the toolkit.","title":"Documentation"},{"location":"#examples","text":"Additionally, you can refer to examples in the respective folder .","title":"Examples"},{"location":"#license","text":"This software is licensed under Apache license 2.0. See LICENSE .","title":"License"},{"location":"#contributing-code-to-simulai_1","text":"If you are interested in directly contributing to this project, please see CONTRIBUTING .","title":"Contributing code to SimulAI"},{"location":"#how-to-cite-simulai-in-your-publications","text":"If you find SimulAI to be useful, please consider citing it in your published work: @misc{simulai, author = {IBM}, title = {SimulAI Toolkit}, subtitle = {A Python package with data-driven pipelines for physics-informed machine learning}, note = \"https://github.com/IBM/simulai\", doi = {10.5281/zenodo.7351516}, year = {2022}, } or, via Zenodo: @software{joao_lucas_de_sousa_almeida_2023_7566603, author = {Jo\u00e3o Lucas de Sousa Almeida and Leonardo Martins and Tar\u0131k Kaan Ko\u00e7}, title = {IBM/simulai: 0.99.13}, month = jan, year = 2023, publisher = {Zenodo}, version = {0.99.25}, doi = {10.5281/zenodo.7566603}, url = {https://doi.org/10.5281/zenodo.7566603} }","title":"How to cite SimulAI in your publications"},{"location":"#publications","text":"Jo\u00e3o Lucas de Sousa Almeida, Pedro Roberto Barbosa Rocha, Allan Moreira de Carvalho and Alberto Costa Nogueira Jr. A coupled Variational Encoder-Decoder - DeepONet surrogate model for the Rayleigh-B\u00e9nard convection problem. In When Machine Learning meets Dynamical Systems: Theory and Applications, AAAI, 2023. Jo\u00e3o Lucas S. Almeida, Arthur C. Pires, Klaus F. V. Cid, and Alberto C. Nogueira Jr. Non-intrusive operator inference for chaotic systems. IEEE Transactions on Artificial Intelligence, pages 1--14, 2022. Pedro Roberto Barbosa Rocha, Marcos Sebasti\u00e3o de Paula Gomes, Allan Moreira de Carvalho, Jo\u00e3o Lucas de Sousa Almeida and Alberto Costa Nogueira Jr. Data-driven reduced-order model for atmospheric CO2 dispersion. In AAAI 2022 Fall Symposium: The Role of AI in Responding to Climate Challenges, 2022. Pedro Roberto Barbosa Rocha, Jo\u00e3o Lucas de Sousa Almeida, Marcos Sebasti\u00e3o de Paula Gomes, Alberto Costa Nogueira, Reduced-order modeling of the two-dimensional Rayleigh--B\u00e9nard convection flow through a non-intrusive operator inference, Engineering Applications of Artificial Intelligence, Volume 126, Part B, 2023, 106923, ISSN 0952-1976, https://doi.org/10.1016/j.engappai.2023.106923 . ( https://www.sciencedirect.com/science/article/pii/S0952197623011077 )","title":"Publications"},{"location":"#references","text":"Jaeger, H., Haas, H. (2004). \\\"Harnessing Nonlinearity: Predicting Chaotic Systems and Saving Energy in Wireless Communication,\\\" Science , 304 (5667): 78--80. \\< https://doi.org/10.1126/science.1091277 >`_. Lu, L., Jin, P., Pang, G., Zhang, Z., Karniadakis, G. E. (2021). \\\"Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators,\\\" Nature Machine Intelligence , 3 (1): 218--229. ISSN: 2522-5839. \\< https://doi.org/10.1038/s42256-021-00302-5 >`_. Eivazi, H., Le Clainche, S., Hoyas, S., Vinuesa, R. (2022) \\\"Towards extraction of orthogonal and parsimonious non-linear modes from turbulent flows\\\" Expert Systems with Applications , 202 . ISSN: 0957-4174. \\< https://doi.org/10.1016/j.eswa.2022.117038 >`_. Raissi, M., Perdikaris, P., Karniadakis, G. E. (2019). \\\"Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations,\\\" Journal of Computational Physics , 378 (1): 686-707. ISSN: 0021-9991. \\< https://doi.org/10.1016/j.jcp.2018.10.045 >`_. Lusch, B., Kutz, J. N., Brunton, S.L. (2018). \\\"Deep learning for universal linear embeddings of nonlinear dynamics,\\\" Nature Communications , 9 : 4950. ISSN: 2041-1723. \\< https://doi.org/10.1038/s41467-018-07210-0 >`_. McQuarrie, S., Huang, C. and Willcox, K. (2021). \\\"Data-driven reduced-order models via regularized operator inference for a single-injector combustion process,\\\" Journal of the Royal Society of New Zealand , 51 (2): 194-211. ISSN: 0303-6758. \\< https://doi.org/10.1080/03036758.2020.1863237 >`_.","title":"References"},{"location":"simulai_activations/","text":"red { color: red } Activations # Siren # Bases: Module Sinusoidal Representation Networks (SIREN) Source code in simulai/activations.py 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 class Siren ( torch . nn . Module ): \"\"\"Sinusoidal Representation Networks (SIREN)\"\"\" name = \"Siren\" def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c @property def share_to_host ( self ) -> dict : \"\"\"Return the parameters of the SIREN model. Returns: dict: A dictionary containing the parameters 'omega_0' and 'c'. \"\"\" return { \"omega_0\" : self . omega_0 , \"c\" : self . c } def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input ) share_to_host : dict property # Return the parameters of the SIREN model. Returns: Name Type Description dict dict A dictionary containing the parameters 'omega_0' and 'c'. __init__ ( omega_0 = None , c = None ) # Initialize SIREN model with given parameters. Parameters: Name Type Description Default omega_0 float (Default value = None) None c float (Default value = None) None Source code in simulai/activations.py 23 24 25 26 27 28 29 30 31 32 33 34 def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c forward ( input ) # Perform the forward pass of the SIREN model on the input. Parameters: Name Type Description Default input Tensor The input to the SIREN model. required Returns: Type Description Tensor torch.Tensor: The output of the SIREN model. Source code in simulai/activations.py 47 48 49 50 51 52 53 54 55 56 57 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input ) sin # Bases: Module Sine activation function. This module applies the sine function element-wise to the input. Source code in simulai/activations.py 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 class sin ( torch . nn . Module ): \"\"\"Sine activation function. This module applies the sine function element-wise to the input. \"\"\" name = \"sin\" def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ () def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input ) __init__ () # Initialize the sine activation function. Source code in simulai/activations.py 69 70 71 72 73 74 def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ () forward ( input ) # Perform the forward pass of the sine activation function on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 76 77 78 79 80 81 82 83 84 85 86 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input ) Wavelet # Bases: Module Wavelet activation Source code in simulai/activations.py 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 class Wavelet ( torch . nn . Module ): \"\"\"Wavelet activation\"\"\" name = \"wavelet\" def __init__ ( self ) -> None : super ( Wavelet , self ) . __init__ () self . w1 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) self . w2 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input ) forward ( input ) # Perform the forward pass of the Wavelet activation on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 100 101 102 103 104 105 106 107 108 109 110 111 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input )","title":"Simulai activations"},{"location":"simulai_activations/#activations","text":"","title":"Activations"},{"location":"simulai_activations/#siren","text":"Bases: Module Sinusoidal Representation Networks (SIREN) Source code in simulai/activations.py 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 class Siren ( torch . nn . Module ): \"\"\"Sinusoidal Representation Networks (SIREN)\"\"\" name = \"Siren\" def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c @property def share_to_host ( self ) -> dict : \"\"\"Return the parameters of the SIREN model. Returns: dict: A dictionary containing the parameters 'omega_0' and 'c'. \"\"\" return { \"omega_0\" : self . omega_0 , \"c\" : self . c } def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input )","title":"Siren"},{"location":"simulai_activations/#simulai.activations.Siren.share_to_host","text":"Return the parameters of the SIREN model. Returns: Name Type Description dict dict A dictionary containing the parameters 'omega_0' and 'c'.","title":"share_to_host"},{"location":"simulai_activations/#simulai.activations.Siren.__init__","text":"Initialize SIREN model with given parameters. Parameters: Name Type Description Default omega_0 float (Default value = None) None c float (Default value = None) None Source code in simulai/activations.py 23 24 25 26 27 28 29 30 31 32 33 34 def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c","title":"__init__()"},{"location":"simulai_activations/#simulai.activations.Siren.forward","text":"Perform the forward pass of the SIREN model on the input. Parameters: Name Type Description Default input Tensor The input to the SIREN model. required Returns: Type Description Tensor torch.Tensor: The output of the SIREN model. Source code in simulai/activations.py 47 48 49 50 51 52 53 54 55 56 57 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input )","title":"forward()"},{"location":"simulai_activations/#sin","text":"Bases: Module Sine activation function. This module applies the sine function element-wise to the input. Source code in simulai/activations.py 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 class sin ( torch . nn . Module ): \"\"\"Sine activation function. This module applies the sine function element-wise to the input. \"\"\" name = \"sin\" def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ () def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input )","title":"sin"},{"location":"simulai_activations/#simulai.activations.sin.__init__","text":"Initialize the sine activation function. Source code in simulai/activations.py 69 70 71 72 73 74 def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ ()","title":"__init__()"},{"location":"simulai_activations/#simulai.activations.sin.forward","text":"Perform the forward pass of the sine activation function on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 76 77 78 79 80 81 82 83 84 85 86 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input )","title":"forward()"},{"location":"simulai_activations/#wavelet","text":"Bases: Module Wavelet activation Source code in simulai/activations.py 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 class Wavelet ( torch . nn . Module ): \"\"\"Wavelet activation\"\"\" name = \"wavelet\" def __init__ ( self ) -> None : super ( Wavelet , self ) . __init__ () self . w1 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) self . w2 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input )","title":"Wavelet"},{"location":"simulai_activations/#simulai.activations.Wavelet.forward","text":"Perform the forward pass of the Wavelet activation on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 100 101 102 103 104 105 106 107 108 109 110 111 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input )","title":"forward()"},{"location":"simulai_batching/","text":"red { color: red } Batching operations # BatchwiseSampler # Source code in simulai/batching.py 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 class BatchwiseSampler : def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass # Evaluating the global minimum and maximum for all the # datasets in self.dataset def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list ) def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape ) def _normalization_bypass ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Bypass the normalization. Args: data (np.ndarray, optional): The data to be bypassed. (Default value = None) Returns: Same data: \"\"\" return data def _target_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the target data using the provided normalizer. Args: data (np.ndarray, optional): The target data to be normalized. (Default value = None) Returns: Normalized target data.: \"\"\" return self . target_normalizer ( data = data ) def _input_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the input data using the provided normalizer. Args: data (np.ndarray, optional): The input data to be normalized. (Default value = None) Returns: Normalized input data.: \"\"\" return self . input_normalizer ( data = data ) def _transpose_first_channel ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Transpose the first channel of the variables list. Args: variables_list (list, optional): (Default value = None) \"\"\" batch = np . stack ( variables_list , axis =- 1 ) dims = list ( range ( len ( batch . shape ))) dims_t = [ 0 ] + [ dims [ - 1 ]] + dims [ 1 : - 1 ] batch = batch . transpose ( * dims_t ) return torch . from_numpy ( batch . astype ( \"float32\" )) def _simple_stack ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Stack the variables list along the last axis. Args: variables_list (list, optional): The list of variables to be stacked. (Default value = None) Returns: A torch tensor of stacked variables.: \"\"\" batch = np . stack ( variables_list , dim =- 1 ) return torch . from_numpy ( batch . astype ( \"float32\" )) def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) __init__ ( dataset = None , input_variables = None , target_variables = None , input_normalizer = None , target_normalizer = None , channels_first = None ) # Batchwise sampler for loading samples from disk and apply normalization if needed. Parameters: Name Type Description Default dataset Group Dataset object containing the samples (Default value = None) None input_variables List [ str ] List of input variables to be loaded (Default value = None) None target_variables List [ str ] List of target variables to be loaded (Default value = None) None input_normalizer callable Function to be applied on the input variables (Default value = None) None target_normalizer callable Function to be applied on the target variables (Default value = None) None channels_first bool Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) None Source code in simulai/batching.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass input_data ( indices = None ) # Retrieve the input data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the input data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of input data: Source code in simulai/batching.py 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) input_shape () # Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: Type Description list A list of integers representing the shape of the input variables.: Source code in simulai/batching.py 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape ) minmax ( batch_size = None , data_interval = None ) # Evaluate the minimum and maximum values of all the target variables in the dataset. Parameters: Name Type Description Default batch_size int Number of samples to use in the evaluation (Default value = None) None data_interval list List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) None Returns: Type Description Tuple [ float , float ] A tuple of minimum and maximum value of the target variables.: Source code in simulai/batching.py 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list ) target_data ( indices = None ) # Retrieve the target data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the target data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of target data: Source code in simulai/batching.py 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) batchdomain_constructor # Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Parameters: Name Type Description Default data_interval list A list of two integers representing the start and end of the data interval. (Default value = None) None batch_size int The desired size of the batches (Default value = None) None batch_indices list A list of indices to be divided into batches. (Default value = None) None Returns: Type Description list A list of lists containing the indices of the input data in the form of batches.: Source code in simulai/batching.py 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 def batchdomain_constructor ( data_interval : list = None , batch_size : int = None , batch_indices : list = None ) -> list : \"\"\"Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Args: data_interval (list, optional): A list of two integers representing the start and end of the data interval. (Default value = None) batch_size (int, optional): The desired size of the batches (Default value = None) batch_indices (list, optional): A list of indices to be divided into batches. (Default value = None) Returns: A list of lists containing the indices of the input data in the form of batches.: \"\"\" if data_interval is not None : interval_size = data_interval [ 1 ] - data_interval [ 0 ] interval = data_interval elif batch_indices is not None : interval_size = len ( batch_indices ) interval = [ batch_indices [ 0 ], batch_indices [ - 1 ]] else : raise Exception ( \"Either data_interval or batch_indices must be provided.\" ) if data_interval is not None : if interval_size < batch_size : batches_ = [ interval [ 0 ], interval [ 1 ]] batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) residual = interval_size % batch_size batch_size_plus = floor ( residual / n_batches ) batch_size_plus_residual = residual % n_batches batch_size_up = batch_size + batch_size_plus batches_ = ( [ interval [ 0 ]] + [ batch_size_up + 1 ] * batch_size_plus_residual + [ batch_size_up ] * ( n_batches - batch_size_plus_residual ) ) batches_ = np . cumsum ( batches_ ) batches = [ batches_ [ i : i + 2 ] for i in range ( batches_ . shape [ 0 ] - 1 )] else : if interval_size < batch_size : batches_ = batch_indices batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) batches_ = np . array_split ( batch_indices , n_batches , axis = 0 ) batches = [ item . tolist () for item in batches_ ] return batches","title":"Simulai batching"},{"location":"simulai_batching/#batching-operations","text":"","title":"Batching operations"},{"location":"simulai_batching/#batchwisesampler","text":"Source code in simulai/batching.py 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 class BatchwiseSampler : def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass # Evaluating the global minimum and maximum for all the # datasets in self.dataset def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list ) def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape ) def _normalization_bypass ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Bypass the normalization. Args: data (np.ndarray, optional): The data to be bypassed. (Default value = None) Returns: Same data: \"\"\" return data def _target_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the target data using the provided normalizer. Args: data (np.ndarray, optional): The target data to be normalized. (Default value = None) Returns: Normalized target data.: \"\"\" return self . target_normalizer ( data = data ) def _input_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the input data using the provided normalizer. Args: data (np.ndarray, optional): The input data to be normalized. (Default value = None) Returns: Normalized input data.: \"\"\" return self . input_normalizer ( data = data ) def _transpose_first_channel ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Transpose the first channel of the variables list. Args: variables_list (list, optional): (Default value = None) \"\"\" batch = np . stack ( variables_list , axis =- 1 ) dims = list ( range ( len ( batch . shape ))) dims_t = [ 0 ] + [ dims [ - 1 ]] + dims [ 1 : - 1 ] batch = batch . transpose ( * dims_t ) return torch . from_numpy ( batch . astype ( \"float32\" )) def _simple_stack ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Stack the variables list along the last axis. Args: variables_list (list, optional): The list of variables to be stacked. (Default value = None) Returns: A torch tensor of stacked variables.: \"\"\" batch = np . stack ( variables_list , dim =- 1 ) return torch . from_numpy ( batch . astype ( \"float32\" )) def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) )","title":"BatchwiseSampler"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.__init__","text":"Batchwise sampler for loading samples from disk and apply normalization if needed. Parameters: Name Type Description Default dataset Group Dataset object containing the samples (Default value = None) None input_variables List [ str ] List of input variables to be loaded (Default value = None) None target_variables List [ str ] List of target variables to be loaded (Default value = None) None input_normalizer callable Function to be applied on the input variables (Default value = None) None target_normalizer callable Function to be applied on the target variables (Default value = None) None channels_first bool Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) None Source code in simulai/batching.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass","title":"__init__()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.input_data","text":"Retrieve the input data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the input data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of input data: Source code in simulai/batching.py 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) )","title":"input_data()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.input_shape","text":"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: Type Description list A list of integers representing the shape of the input variables.: Source code in simulai/batching.py 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape )","title":"input_shape()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.minmax","text":"Evaluate the minimum and maximum values of all the target variables in the dataset. Parameters: Name Type Description Default batch_size int Number of samples to use in the evaluation (Default value = None) None data_interval list List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) None Returns: Type Description Tuple [ float , float ] A tuple of minimum and maximum value of the target variables.: Source code in simulai/batching.py 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list )","title":"minmax()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.target_data","text":"Retrieve the target data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the target data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of target data: Source code in simulai/batching.py 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) )","title":"target_data()"},{"location":"simulai_batching/#batchdomain_constructor","text":"Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Parameters: Name Type Description Default data_interval list A list of two integers representing the start and end of the data interval. (Default value = None) None batch_size int The desired size of the batches (Default value = None) None batch_indices list A list of indices to be divided into batches. (Default value = None) None Returns: Type Description list A list of lists containing the indices of the input data in the form of batches.: Source code in simulai/batching.py 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 def batchdomain_constructor ( data_interval : list = None , batch_size : int = None , batch_indices : list = None ) -> list : \"\"\"Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Args: data_interval (list, optional): A list of two integers representing the start and end of the data interval. (Default value = None) batch_size (int, optional): The desired size of the batches (Default value = None) batch_indices (list, optional): A list of indices to be divided into batches. (Default value = None) Returns: A list of lists containing the indices of the input data in the form of batches.: \"\"\" if data_interval is not None : interval_size = data_interval [ 1 ] - data_interval [ 0 ] interval = data_interval elif batch_indices is not None : interval_size = len ( batch_indices ) interval = [ batch_indices [ 0 ], batch_indices [ - 1 ]] else : raise Exception ( \"Either data_interval or batch_indices must be provided.\" ) if data_interval is not None : if interval_size < batch_size : batches_ = [ interval [ 0 ], interval [ 1 ]] batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) residual = interval_size % batch_size batch_size_plus = floor ( residual / n_batches ) batch_size_plus_residual = residual % n_batches batch_size_up = batch_size + batch_size_plus batches_ = ( [ interval [ 0 ]] + [ batch_size_up + 1 ] * batch_size_plus_residual + [ batch_size_up ] * ( n_batches - batch_size_plus_residual ) ) batches_ = np . cumsum ( batches_ ) batches = [ batches_ [ i : i + 2 ] for i in range ( batches_ . shape [ 0 ] - 1 )] else : if interval_size < batch_size : batches_ = batch_indices batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) batches_ = np . array_split ( batch_indices , n_batches , axis = 0 ) batches = [ item . tolist () for item in batches_ ] return batches","title":"batchdomain_constructor"},{"location":"simulai_file/","text":"red { color: red } File IO # load_pkl # Load a pickle file into a Python object. Parameters: Name Type Description Default path str (Default value = None) None Returns: Type Description Union [ object , None] object or None: Raises: Type Description Exception if the provided path is not a file or cannot be opened Source code in simulai/file.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 def load_pkl ( path : str = None ) -> Union [ object , None ]: \"\"\"Load a pickle file into a Python object. Args: path (str, optional): (Default value = None) Returns: object or None: Raises: Exception: if the provided path is not a file or cannot be opened \"\"\" import pickle filename = os . path . basename ( path ) file_extension = filename . split ( \".\" )[ - 1 ] if file_extension == \"pkl\" : if os . path . isfile ( path ): try : with open ( path , \"rb\" ) as fp : model = pickle . load ( fp ) return model except : raise Exception ( f \"The file { path } could not be opened.\" ) else : raise Exception ( f \"The file { path } is not a file.\" ) else : raise Exception ( f \"The file format { file_extension } is not supported. It must be pickle.\" ) SPFile # Source code in simulai/file.py 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 class SPFile : def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact def _leading_size ( self , first_line : str = None ) -> int : \"\"\"Returns the number of leading white spaces in the given line Args: first_line (str, optional): (Default value = None) Returns: int: number of leading white spaces. \"\"\" leading_whitespaces = len ( first_line ) - len ( first_line . lstrip ()) return leading_whitespaces def _process_code ( self , code : str = None ) -> str : \"\"\"Returns the code string with leading white spaces removed from each line Args: code (str, optional): The code string which to remove the leading whitespaces (Default value = None) Returns: str: The code string with leading white spaces removed. \"\"\" code_lines = code . split ( \" \\n \" ) first_line = code_lines [ 0 ] leading_size = self . _leading_size ( first_line = first_line ) code_lines_ = [ item [ leading_size :] for item in code_lines ] return \" \\n \" . join ( code_lines_ ) def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device ) def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model __init__ ( compact = False ) # Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Parameters: Name Type Description Default compact bool Compress the directory to a tar file or not. Default : False False Source code in simulai/file.py 65 66 67 68 69 70 71 72 73 74 75 76 def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact read ( model_path = None , device = None , template_name = None ) # Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Parameters: Name Type Description Default model_path str Complete path to the model. (Default value = None) None device str Device to load the model onto. (Default value = None) None template_name str (Default value = None) None Returns: Name Type Description NetworkTemplate child of torch.nn.Module The model restored to memory. Source code in simulai/file.py 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model write ( save_dir = None , name = None , template = None , args = None , model = None , device = None ) # Writes the model and its instantiating function to a directory. Parameters: Name Type Description Default save_dir str The absolute directory path to save the model (Default value = None) None name str A name for the model. (Default value = None) None template callable A function for instantiating the model. (Default value = None) None args dict Dictionary containing arguments to be passed to template. (Default value = None) None model NetworkTemplate The model to be saved. (Default value = None) None device str (Default value = None) None Source code in simulai/file.py 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device )","title":"Simulai file"},{"location":"simulai_file/#file-io","text":"","title":"File IO"},{"location":"simulai_file/#load_pkl","text":"Load a pickle file into a Python object. Parameters: Name Type Description Default path str (Default value = None) None Returns: Type Description Union [ object , None] object or None: Raises: Type Description Exception if the provided path is not a file or cannot be opened Source code in simulai/file.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 def load_pkl ( path : str = None ) -> Union [ object , None ]: \"\"\"Load a pickle file into a Python object. Args: path (str, optional): (Default value = None) Returns: object or None: Raises: Exception: if the provided path is not a file or cannot be opened \"\"\" import pickle filename = os . path . basename ( path ) file_extension = filename . split ( \".\" )[ - 1 ] if file_extension == \"pkl\" : if os . path . isfile ( path ): try : with open ( path , \"rb\" ) as fp : model = pickle . load ( fp ) return model except : raise Exception ( f \"The file { path } could not be opened.\" ) else : raise Exception ( f \"The file { path } is not a file.\" ) else : raise Exception ( f \"The file format { file_extension } is not supported. It must be pickle.\" )","title":"load_pkl"},{"location":"simulai_file/#spfile","text":"Source code in simulai/file.py 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 class SPFile : def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact def _leading_size ( self , first_line : str = None ) -> int : \"\"\"Returns the number of leading white spaces in the given line Args: first_line (str, optional): (Default value = None) Returns: int: number of leading white spaces. \"\"\" leading_whitespaces = len ( first_line ) - len ( first_line . lstrip ()) return leading_whitespaces def _process_code ( self , code : str = None ) -> str : \"\"\"Returns the code string with leading white spaces removed from each line Args: code (str, optional): The code string which to remove the leading whitespaces (Default value = None) Returns: str: The code string with leading white spaces removed. \"\"\" code_lines = code . split ( \" \\n \" ) first_line = code_lines [ 0 ] leading_size = self . _leading_size ( first_line = first_line ) code_lines_ = [ item [ leading_size :] for item in code_lines ] return \" \\n \" . join ( code_lines_ ) def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device ) def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model","title":"SPFile"},{"location":"simulai_file/#simulai.file.SPFile.__init__","text":"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Parameters: Name Type Description Default compact bool Compress the directory to a tar file or not. Default : False False Source code in simulai/file.py 65 66 67 68 69 70 71 72 73 74 75 76 def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact","title":"__init__()"},{"location":"simulai_file/#simulai.file.SPFile.read","text":"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Parameters: Name Type Description Default model_path str Complete path to the model. (Default value = None) None device str Device to load the model onto. (Default value = None) None template_name str (Default value = None) None Returns: Name Type Description NetworkTemplate child of torch.nn.Module The model restored to memory. Source code in simulai/file.py 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model","title":"read()"},{"location":"simulai_file/#simulai.file.SPFile.write","text":"Writes the model and its instantiating function to a directory. Parameters: Name Type Description Default save_dir str The absolute directory path to save the model (Default value = None) None name str A name for the model. (Default value = None) None template callable A function for instantiating the model. (Default value = None) None args dict Dictionary containing arguments to be passed to template. (Default value = None) None model NetworkTemplate The model to be saved. (Default value = None) None device str (Default value = None) None Source code in simulai/file.py 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device )","title":"write()"},{"location":"simulai_io/","text":"red { color: red } simulai.io # ByPassPreparer # Bases: DataPreparer ByPass class, it fills the DataPreparer blank, but does nothing. Source code in simulai/io.py 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 class ByPassPreparer ( DataPreparer ): \"\"\"ByPass class, it fills the DataPreparer blank, but does nothing.\"\"\" name = \"no_preparer\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . dtype = None def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data def prepare_output_structured_data ( self , data : np . ndarray ) -> np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', '>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) Source code in simulai/io.py 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data prepare_input_structured_data ( data ) # Prepare structured input data by converting it to an ndarray. Parameters: Name Type Description Default data recarray required Returns: Type Description ndarray np.ndarray: numpy ndarray version of the input data. Note This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) Source code in simulai/io.py 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data prepare_output_data ( data ) # Prepare output data. Parameters: Name Type Description Default data ndarray required Returns: Type Description ndarray numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) Source code in simulai/io.py 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data prepare_output_structured_data ( data ) # Prepare structured output data by converting it to a recarray. Parameters: Name Type Description Default data ndarray required Returns: Type Description recarray np.recarray: numpy recarray version of the output data. Note This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' (n0, prod(n1, ..., nm)) Source code in simulai/io.py 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class Reshaper ( DataPreparer ): \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm))\"\"\" name = \"reshaper\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . collapsed_shape = None self . dtype = None self . n_features = None def _set_shapes_from_data ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): The input data to reshape. (Default value = None) Example: >>> reshaper = Reshaper() >>> reshaper._set_shapes_from_data(np.random.random((10,3,4,5))) >>> reshaper.collapsible_shapes (3, 4, 5) \"\"\" self . collapsible_shapes = data . shape [ 1 :] self . collapsed_shape = np . prod ( self . collapsible_shapes ) . astype ( int ) self . _is_recarray = data . dtype . names is not None if self . _is_recarray : self . n_features = len ( data . dtype . names ) * self . collapsed_shape else : self . n_features = self . collapsed_shape def _prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function reshapes the input data to (n0, prod(n1, ..., nm)) shape. Example: >>> reshaper = Reshaper() >>> data = np.random.random((10,3,4,5)) >>> reshaper.prepare_input_data(data) array([[0.527, 0.936, ... , 0.812], [0.947, 0.865, ... , 0.947], ..., [0.865, 0.947, ... , 0.865], [0.947, 0.865, ... , 0.947]]) \"\"\" assert len ( data . shape ) > 1 , \"Error! data must have at least two dimensions\" return data . reshape (( data . shape [ 0 ], self . n_features )) def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data ) def _reshape_to_output ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Reshape the data to its original shape before reshaping. Args: data (np.ndarray): Returns: np.ndarray: Note: The original shape of the data is stored in `collapsible_shapes` attribute. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper._set_shapes_from_data(input_data) >>> reshaped_data = reshaper._reshape_to_output(input_data.flatten()) >>> reshaped_data.shape (2, 3, 4) \"\"\" return data . reshape (( data . shape [ 0 ],) + self . collapsible_shapes ) def _prepare_output_data ( self , data : np . ndarray = None , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray, optional): The input data to be prepared, by default None single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" if self . _is_recarray : return self . _prepare_output_structured_data ( data ) else : return self . _reshape_to_output ( data ) def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data ) def _prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" self . dtype = data . dtype self . _set_shapes_from_data ( data ) data_ = recfunctions . structured_to_unstructured ( data ) reshaped_data_ = self . _prepare_input_data ( data_ ) return reshaped_data_ def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data ) def _prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" data = data . reshape ( ( data . shape [ 0 ],) + self . collapsible_shapes + ( len ( self . dtype ),) ) output_data = recfunctions . unstructured_to_structured ( data , self . dtype ) output_data = self . _reshape_to_output ( output_data ) return output_data prepare_input_data ( data ) # Prepare input data for reshaping. Parameters: Name Type Description Default data Union [ ndarray , recarray ] required Returns: Type Description ndarray np.ndarray: Note If data is a structured numpy array, it will be passed to _prepare_input_structured_data function. If data is a plain numpy array, it will be passed to _prepare_input_data function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) Source code in simulai/io.py 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data ) prepare_input_structured_data ( data = None ) # Prepare the input structured data to be in the shape and format expected by the model. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: The prepared input structured data Source code in simulai/io.py 297 298 299 300 301 302 303 304 305 306 307 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data ) prepare_output_data ( data , single = False ) # Prepare the input data to be in the shape and format expected by the model. Parameters: Name Type Description Default data ndarray The input data to be prepared required single bool (Default value = False) False Returns: Type Description ndarray np.ndarray: The prepared input data Source code in simulai/io.py 268 269 270 271 272 273 274 275 276 277 278 279 def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data ) prepare_output_structured_data ( data = None ) # Prepare the output data to be in the shape and format expected by the user. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description recarray np.recarray: The prepared output structured data Source code in simulai/io.py 309 310 311 312 313 314 315 316 317 318 319 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data ) ScalerReshaper # Bases: Reshaper ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data. Source code in simulai/io.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 class ScalerReshaper ( Reshaper ): \"\"\"ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data.\"\"\" name = \"scalerreshaper\" def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data ) def _get_structured_bias_scale ( self , dtype : np . dtype = None ) -> Tuple [ dict , dict ]: \"\"\"Get the bias and scale values for each field of a structured array. Args: dtype (np.dtype, optional): (Default value = None) Returns: Tuple[dict, dict]: A tuple of two dictionaries, the first containing the bias values for each field and the second Note: If the bias and scale attributes are floats, they will be used for all fields. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper._get_structured_bias_scale(np.dtype([('a', float), ('b', float)])) ({'a': 10, 'b': 10}, {'a': 2, 'b': 2}) \"\"\" bias = self . bias if isinstance ( self . bias , float ): bias = { n : self . bias for n in dtype . names } scale = self . scale if isinstance ( self . scale , float ): scale = { n : self . scale for n in dtype . names } return bias , scale def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_structured_data ( self , data : np . ndarray = None , * args , ** kwargs ) -> np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' (n0, prod(n1, ..., nm)) Parameters: Name Type Description Default bias float (Default value = 0.0) 0.0 scale float (Default value = 1.0) 1.0 channels_last bool (Default value = False) False Source code in simulai/io.py 345 346 347 348 349 350 351 352 353 354 355 356 357 358 def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale prepare_input_data ( data = None , * args , ** kwargs ) # Prepare the input data by subtracting the bias and scaling the data. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The input data to be prepared (Default value = None) None *args () **kwargs {} Returns: Type Description ndarray np.ndarray: The prepared input data Note If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) Source code in simulai/io.py 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs ) prepare_input_structured_data ( data = None , * args , ** kwargs ) # Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data recarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () Returns: Type Description ndarray np.ndarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) Source code in simulai/io.py 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs ) prepare_output_data ( data = None , * args , ** kwargs ) # Prepare the output data by scaling it and adding the bias. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The output data to be prepared (Default value = None) None *args () Returns: Type Description ndarray np.ndarray: The prepared output data Note If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) Source code in simulai/io.py 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data ) prepare_output_structured_data ( data = None , * args , ** kwargs ) # Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data ndarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () **kwargs {} Returns: Type Description recarray np.recarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. Source code in simulai/io.py 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 class MapValid ( Reshaper ): \"\"\"MapValid is a reshaper class that converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. \"\"\" name = \"map_valid\" def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ] def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data ) __init__ ( config = None , mask = None , channels_last = True ) # Initialize the MapValid class with the configurations and mask passed as parameters. Parameters: Name Type Description Default config dict configurations dictionary, by default None None mask ( int , NaN , inf , optional ) mask to select the invalid values, by default None None channels_last bool if set to True, move the channel dimension to the last, by default True True Source code in simulai/io.py 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last prepare_input_data ( data = None ) # Internal input data preparer, executed for each label of the structured array Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) Source code in simulai/io.py 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ] prepare_input_structured_data ( data = None ) # This function is used to prepare structured input data for further processing. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data ) prepare_output_data ( data = None ) # Prepare output data for the MapValid operation. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], Source code in simulai/io.py 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data prepare_output_structured_data ( data = None ) # This function is used to prepare structured output data for further processing. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data ) Sampling # Bases: DataPreparer This class is used for sampling data from the input dataset. Source code in simulai/io.py 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 class Sampling ( DataPreparer ): \"\"\"This class is used for sampling data from the input dataset.\"\"\" name = \"sampling\" def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None @property def indices ( self ) -> list : \"\"\"Returns the indices of the data that have been sampled. Returns: list: The indices of the data that have been sampled. Raises: AssertionError: If the indices have not been generated yet. Note: The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1] \"\"\" assert self . sampled_indices is not None , ( \"The indices still were not generate.\" \"Run prepare_input_data or prepare_input_structured_data for getting them.\" ) return sorted ( self . sampled_indices . tolist ()) def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ] def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data indices : list property # Returns the indices of the data that have been sampled. Returns: Name Type Description list list The indices of the data that have been sampled. Raises: Type Description AssertionError If the indices have not been generated yet. Note The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1] __init__ ( choices_fraction = 0.1 , shuffling = False ) # Initializes the Sampling class. Parameters: Name Type Description Default choices_fraction float The fraction of the dataset to be sampled, by default 0.1 0.1 shuffling bool Whether to shuffle the data before sampling, by default False False Source code in simulai/io.py 711 712 713 714 715 716 717 718 719 720 721 722 723 724 def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None prepare_input_data ( data = None , data_interval = None ) # Prepare input data for sampling. Parameters: Name Type Description Default data ndarray The input data. Default is None. None data_interval list The interval of data that should be selected. Default is None, None Returns: Type Description ndarray numpy.ndarray: The sampled data. Note: The data_interval parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) Source code in simulai/io.py 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ] prepare_input_structured_data ( data = None , data_interval = None , batch_size = None , dump_path = None ) # Prepares structured data for further processing. Parameters: Name Type Description Default data Dataset Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) None data_interval list The interval of the data to be prepared, the default shape is [0, data.shape[0]] None batch_size int The size of the batches to be processed, defaults to None None dump_path str (Default value = None) None Returns: Type Description recarray np.recarray: Note The features dimensions of the input data should be 1 in NumPy structured arrays. When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) Source code in simulai/io.py 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data MovingWindow # MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) Source code in simulai/io.py 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 class MovingWindow : r \"\"\"MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \" def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series ) def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :] def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __call__ ( input_data = None , output_data = None ) # Apply Moving Window over the input data Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note It is expected that the input_data and output_data have the same shape This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) Source code in simulai/io.py 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __init__ ( history_size = None , skip_size = 1 , horizon_size = None , full_output = True ) # Initializes the MovingWindow class Parameters: Name Type Description Default history_size int the size of the history window, by default None None skip_size int the number of steps to skip between windows, by default 1 1 horizon_size int the size of the horizon window, by default None None full_output bool flag to use the full output or only the last item, by default True True Source code in simulai/io.py 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \" bypass ( batch ) # Does nothing, returns the input batch. Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: the input array Source code in simulai/io.py 955 956 957 958 959 960 961 962 963 964 965 def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch get_last_item ( batch ) # Get the last item of a batch Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: Note This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) Source code in simulai/io.py 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :] transform ( time_series ) # Applies the moving window over the time_series array. Parameters: Name Type Description Default time_series ndarray required Returns: Type Description ndarray np.ndarray: the transformed array with the windows. Source code in simulai/io.py 943 944 945 946 947 948 949 950 951 952 953 def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series ) SlidingWindow # SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: Name Type Description history_size int The number of history samples to include in each window. skip_size int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] Source code in simulai/io.py 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 class SlidingWindow : r \"\"\"SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: history_size : int The number of history samples to include in each window. skip_size : int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \" def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __call__ ( input_data = None , output_data = None ) # Applies a sliding window operation on the given time series and returns the windowed samples. Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note history_size and horizon_size should be positive integers history_size should be less than the length of input_data input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) Source code in simulai/io.py 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __init__ ( history_size = None , skip_size = None ) # Initialize the SlidingWindow object. Parameters: Name Type Description Default history_size int The number of history samples to include in each window. (Default value = None) None skip_size int The number of samples to skip between each window. (Default value = None) None Source code in simulai/io.py 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \" apply ( time_series ) # Applies the sliding window to the given time series. Parameters: Name Type Description Default time_series List [ int ] required Returns: Type Description List [ List [ int ]] List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] Source code in simulai/io.py 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples IntersectingBatches # IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes. Source code in simulai/io.py 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 class IntersectingBatches : r \"\"\"IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes.\"\"\" def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m ) def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ]) __call__ ( input_data = None ) # Applies the batching strategy to the input data. Parameters: Name Type Description Default input_data ndarray (Default value = None) None Returns: Type Description Union [ list , ndarray ] Union[list, np.ndarray]: A list of batches or a single batch if full attribute is set to False. Note: - If the full attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] Source code in simulai/io.py 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ]) __init__ ( skip_size = 1 , batch_size = None , full = True ) # Initializes the IntersectingBatches class Parameters: Name Type Description Default skip_size int Number of samples to skip between two windows. (Default value = 1) 1 batch_size int Number of samples to use in each batch. (Default value = None) None full bool Whether to include the last batch or not, even if it's not full. (Default value = True) True Source code in simulai/io.py 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full get_indices ( dim = None ) # It gets just the indices of the shifting Parameters: Name Type Description Default dim int total dimension (Default value = None) None Returns: Type Description ndarray np.ndarray: the shifted indices Source code in simulai/io.py 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m ) BatchwiseExtrapolation # BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: Name Type Description time_id int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape Source code in simulai/io.py 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 class BatchwiseExtrapolation : r \"\"\"BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: time_id : int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape \"\"\" def __init__ ( self , op : callable = None , auxiliary_data : np . ndarray = None ) -> None : self . op = op self . auxiliary_data = auxiliary_data self . time_id = 0 def _simple_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : r \"\"\"Given the current extrapolation dataset, use the last history_size number of rows to create the next state of the dataset. Args: extrapolation_dataset (np.ndarray): The current state of the extrapolation dataset. history_size (int, optional): (Default value = 0) Returns: np.ndarray: The next state of the extrapolation dataset. \"\"\" return extrapolation_dataset [ None , - history_size :, :] def _forcing_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : return np . hstack ( [ extrapolation_dataset [ - history_size :, :], self . auxiliary_data [ self . time_id - history_size : self . time_id , :], ] )[ None , :, :] def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset __call__ ( init_state = None , history_size = None , horizon_size = None , testing_data_size = None ) # A function that performs the extrapolation of the time series. Parameters: Name Type Description Default init_state ndarray initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) None history_size int the size of the history window used in the extrapolation. (Default value = None) None horizon_size int the size of the horizon window used in the extrapolation. (Default value = None) None testing_data_size int (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) Source code in simulai/io.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset BatchCopy # A class for copying data in batches and applying a transformation function. Source code in simulai/io.py 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 class BatchCopy : r \"\"\"A class for copying data in batches and applying a transformation function.\"\"\" def __init__ ( self , channels_last : bool = False ) -> None : self . channels_last = channels_last def _single_copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy data from a single h5py.Dataset to another h5py.Dataset in batches. Args: data (h5py.Dataset, optional): (Default value = None) data_interval (list, optional): The interval of the data to be copied. (Default value = None) batch_size (int, optional): The size of the batch to be copied. (Default value = None) dump_path (str, optional): The path where the new h5py.Dataset will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The new h5py.Dataset after the copy process. Note: - Copy data from data_file.h5/data to data_copy.h5/data with a batch size of 1000: - The input must be an h5py.Dataset. Example: >>> data = h5py.File(\"data_file.h5\", \"r\") >>> batch_copy = BatchCopy() >>> dset = batch_copy._single_copy(data=data[\"data\"], data_interval=[0, 100000], batch_size=1000, dump_path=\"data_copy.h5\") \"\"\" assert isinstance ( data , h5py . Dataset ), \"The input must be h5py.Dataset\" variables_list = data . dtype . names data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def _multiple_copy ( self , data : list = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy and concatenate multiple h5py.Dataset objects into a single h5py.Dataset object. Args: data (list, optional): A list of h5py.Dataset objects to be concatenated. (Default value = None) data_interval (list, optional): A list of two integers indicating the start and end index of the data to be concatenated. (Default value = None) batch_size (int, optional): The number of samples to be processed at a time. (Default value = None) dump_path (str, optional): The file path where the concatenated h5py.Dataset object will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The concatenated h5py.Dataset object. \"\"\" assert all ( [ isinstance ( di , h5py . Dataset ) for di in data ] ), \"All inputs must be h5py.Dataset\" variables_list = sum ([ list ( di . dtype . names ) for di in data ], []) data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data [ 0 ] . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) copy ( data = None , data_interval = None , batch_size = None , dump_path = None , transformation = lambda : data ) # Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Parameters: Name Type Description Default data Dataset input data to be copied (Default value = None) None data_interval list the range of the data to be copied (Default value = None) None batch_size int the size of the batches to be used to copy the data (Default value = None) None dump_path str the path of the file where the data will be copied (Default value = None) None transformation callable (Default value = lambda data: data) lambda : data Returns: Type Description Dataset h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the _multiple_copy function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) Source code in simulai/io.py 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) MakeTensor # This class is used to make torch tensors from numpy arrays or dictionaries. Parameters: Name Type Description Default input_names List [ str ] list of input names. None output_names List [ str ] list of output names. None Note input_tensors will be a list of tensors in case of numpy array and dictionary inputs. The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. The input_data will be converted to float32 dtype. The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) Source code in simulai/io.py 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 class MakeTensor : r \"\"\"This class is used to make torch tensors from numpy arrays or dictionaries. Args: input_names (List[str]): list of input names. output_names (List[str]): list of output names. Note: - input_tensors will be a list of tensors in case of numpy array and dictionary inputs. - The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. - The input_data will be converted to float32 dtype. - The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. - If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) \"\"\" def __init__ ( self , input_names = None , output_names = None ): self . input_names = input_names self . output_names = output_names def _make_tensor ( self , input_data : np . ndarray = None , device : str = \"cpu\" ) -> List [ torch . Tensor ]: r \"\"\"Convert input_data to a list of torch tensors. Args: input_data (np.ndarray, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: List[torch.Tensor]: list of tensors. \"\"\" inputs_list = list ( torch . split ( input_data , 1 , dim =- 1 )) for vv , var in enumerate ( inputs_list ): var . requires_grad = True var = var . to ( device ) inputs_list [ vv ] = var # var = var[..., None] return inputs_list def _make_tensor_dict ( self , input_data : dict = None , device : str = \"cpu\" ) -> dict : r \"\"\"Convert input_data to a dictionary of torch tensors. Args: input_data (dict, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: dict: dictionary of tensors. \"\"\" inputs_dict = dict () for key , item in input_data . items (): item . requires_grad = True item = item . to ( device ) inputs_dict [ key ] = item return inputs_dict def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" ) __call__ ( input_data = None , device = 'cpu' ) # Make tensors from input_data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor , Dict [ str , ndarray ]] input data to be converted. (Default value = None) None device str (Default value = \"cpu\") 'cpu' Returns: Type Description List [ Tensor ] Union[List[torch.Tensor], dict]: Raises: Type Description - Exception Source code in simulai/io.py 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" ) GaussianNoise # Bases: Dataset GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) Source code in simulai/io.py 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 class GaussianNoise ( Dataset ): r \"\"\"GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) \"\"\" def __init__ ( self , stddev : float = 0.01 , input_data : Union [ np . ndarray , Tensor ] = None ): super ( Dataset , self ) . __init__ () self . stddev = stddev if isinstance ( input_data , np . ndarray ): input_data_ = torch . from_numpy ( input_data . astype ( \"float32\" )) else : input_data_ = input_data self . input_data = input_data_ self . data_shape = tuple ( self . input_data . shape ) def size ( self ): return self . data_shape def __call__ ( self ): return ( 1 + self . stddev * torch . randn ( * self . data_shape )) * self . input_data Tokenizer # Wrapper for multiple tokenization approaches Source code in simulai/io.py 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 class Tokenizer : \"\"\"Wrapper for multiple tokenization approaches\"\"\" def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" ) def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs ) def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs ) def _make_time_input_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None , step : float = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) step (float): Size of the timestep. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized input dataset. \"\"\" dim = num_step src = np . repeat ( np . expand_dims ( src , axis = 1 ), dim , axis = 1 ) src_shape = src . shape src_shape_list = list ( src_shape ) src_shape_list [ - 1 ] += 1 src_final = np . zeros ( tuple ( src_shape_list )) src_final [:, :, : - 1 ] = src for i in range ( num_step ): src_final [:, i , - 1 ] += step * i return src_final [: - num_step + 1 ] def _make_time_target_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized target dataset. \"\"\" moving_window = MovingWindow ( history_size = 1 , skip_size = 1 , horizon_size = num_step - 1 ) input_data , output_data = moving_window ( input_data = src , output_data = src ) return np . concatenate ([ input_data , output_data ], axis = 1 ) __init__ ( kind = 'time_indexer' ) # Parameters: Name Type Description Default kind str The kind of tokenization to be used. (Default value = \"time_indexer\") 'time_indexer' Source code in simulai/io.py 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" ) generate_input_tokens ( input_data , ** kwargs ) # Generating the input sequence of tokens. Source code in simulai/io.py 1810 1811 1812 1813 1814 def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs ) generate_target_tokens ( target_data , ** kwargs ) # Generating the target sequence of tokens. Source code in simulai/io.py 1816 1817 1818 1819 1820 def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs )","title":"Simulai io"},{"location":"simulai_io/#simulaiio","text":"","title":"simulai.io"},{"location":"simulai_io/#bypasspreparer","text":"Bases: DataPreparer ByPass class, it fills the DataPreparer blank, but does nothing. Source code in simulai/io.py 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 class ByPassPreparer ( DataPreparer ): \"\"\"ByPass class, it fills the DataPreparer blank, but does nothing.\"\"\" name = \"no_preparer\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . dtype = None def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data def prepare_output_structured_data ( self , data : np . ndarray ) -> np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', '>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) Source code in simulai/io.py 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.ByPassPreparer.prepare_input_structured_data","text":"Prepare structured input data by converting it to an ndarray. Parameters: Name Type Description Default data recarray required Returns: Type Description ndarray np.ndarray: numpy ndarray version of the input data. Note This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) Source code in simulai/io.py 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.ByPassPreparer.prepare_output_data","text":"Prepare output data. Parameters: Name Type Description Default data ndarray required Returns: Type Description ndarray numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) Source code in simulai/io.py 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.ByPassPreparer.prepare_output_structured_data","text":"Prepare structured output data by converting it to a recarray. Parameters: Name Type Description Default data ndarray required Returns: Type Description recarray np.recarray: numpy recarray version of the output data. Note This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' (n0, prod(n1, ..., nm)) Source code in simulai/io.py 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class Reshaper ( DataPreparer ): \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm))\"\"\" name = \"reshaper\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . collapsed_shape = None self . dtype = None self . n_features = None def _set_shapes_from_data ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): The input data to reshape. (Default value = None) Example: >>> reshaper = Reshaper() >>> reshaper._set_shapes_from_data(np.random.random((10,3,4,5))) >>> reshaper.collapsible_shapes (3, 4, 5) \"\"\" self . collapsible_shapes = data . shape [ 1 :] self . collapsed_shape = np . prod ( self . collapsible_shapes ) . astype ( int ) self . _is_recarray = data . dtype . names is not None if self . _is_recarray : self . n_features = len ( data . dtype . names ) * self . collapsed_shape else : self . n_features = self . collapsed_shape def _prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function reshapes the input data to (n0, prod(n1, ..., nm)) shape. Example: >>> reshaper = Reshaper() >>> data = np.random.random((10,3,4,5)) >>> reshaper.prepare_input_data(data) array([[0.527, 0.936, ... , 0.812], [0.947, 0.865, ... , 0.947], ..., [0.865, 0.947, ... , 0.865], [0.947, 0.865, ... , 0.947]]) \"\"\" assert len ( data . shape ) > 1 , \"Error! data must have at least two dimensions\" return data . reshape (( data . shape [ 0 ], self . n_features )) def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data ) def _reshape_to_output ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Reshape the data to its original shape before reshaping. Args: data (np.ndarray): Returns: np.ndarray: Note: The original shape of the data is stored in `collapsible_shapes` attribute. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper._set_shapes_from_data(input_data) >>> reshaped_data = reshaper._reshape_to_output(input_data.flatten()) >>> reshaped_data.shape (2, 3, 4) \"\"\" return data . reshape (( data . shape [ 0 ],) + self . collapsible_shapes ) def _prepare_output_data ( self , data : np . ndarray = None , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray, optional): The input data to be prepared, by default None single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" if self . _is_recarray : return self . _prepare_output_structured_data ( data ) else : return self . _reshape_to_output ( data ) def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data ) def _prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" self . dtype = data . dtype self . _set_shapes_from_data ( data ) data_ = recfunctions . structured_to_unstructured ( data ) reshaped_data_ = self . _prepare_input_data ( data_ ) return reshaped_data_ def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data ) def _prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" data = data . reshape ( ( data . shape [ 0 ],) + self . collapsible_shapes + ( len ( self . dtype ),) ) output_data = recfunctions . unstructured_to_structured ( data , self . dtype ) output_data = self . _reshape_to_output ( output_data ) return output_data","title":"Reshaper"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_input_data","text":"Prepare input data for reshaping. Parameters: Name Type Description Default data Union [ ndarray , recarray ] required Returns: Type Description ndarray np.ndarray: Note If data is a structured numpy array, it will be passed to _prepare_input_structured_data function. If data is a plain numpy array, it will be passed to _prepare_input_data function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) Source code in simulai/io.py 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data )","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_input_structured_data","text":"Prepare the input structured data to be in the shape and format expected by the model. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: The prepared input structured data Source code in simulai/io.py 297 298 299 300 301 302 303 304 305 306 307 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data )","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_output_data","text":"Prepare the input data to be in the shape and format expected by the model. Parameters: Name Type Description Default data ndarray The input data to be prepared required single bool (Default value = False) False Returns: Type Description ndarray np.ndarray: The prepared input data Source code in simulai/io.py 268 269 270 271 272 273 274 275 276 277 278 279 def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data )","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_output_structured_data","text":"Prepare the output data to be in the shape and format expected by the user. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description recarray np.recarray: The prepared output structured data Source code in simulai/io.py 309 310 311 312 313 314 315 316 317 318 319 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data )","title":"prepare_output_structured_data()"},{"location":"simulai_io/#scalerreshaper","text":"Bases: Reshaper ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data. Source code in simulai/io.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 class ScalerReshaper ( Reshaper ): \"\"\"ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data.\"\"\" name = \"scalerreshaper\" def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data ) def _get_structured_bias_scale ( self , dtype : np . dtype = None ) -> Tuple [ dict , dict ]: \"\"\"Get the bias and scale values for each field of a structured array. Args: dtype (np.dtype, optional): (Default value = None) Returns: Tuple[dict, dict]: A tuple of two dictionaries, the first containing the bias values for each field and the second Note: If the bias and scale attributes are floats, they will be used for all fields. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper._get_structured_bias_scale(np.dtype([('a', float), ('b', float)])) ({'a': 10, 'b': 10}, {'a': 2, 'b': 2}) \"\"\" bias = self . bias if isinstance ( self . bias , float ): bias = { n : self . bias for n in dtype . names } scale = self . scale if isinstance ( self . scale , float ): scale = { n : self . scale for n in dtype . names } return bias , scale def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_structured_data ( self , data : np . ndarray = None , * args , ** kwargs ) -> np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' (n0, prod(n1, ..., nm)) Parameters: Name Type Description Default bias float (Default value = 0.0) 0.0 scale float (Default value = 1.0) 1.0 channels_last bool (Default value = False) False Source code in simulai/io.py 345 346 347 348 349 350 351 352 353 354 355 356 357 358 def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale","title":"__init__()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_input_data","text":"Prepare the input data by subtracting the bias and scaling the data. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The input data to be prepared (Default value = None) None *args () **kwargs {} Returns: Type Description ndarray np.ndarray: The prepared input data Note If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) Source code in simulai/io.py 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs )","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_input_structured_data","text":"Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data recarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () Returns: Type Description ndarray np.ndarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) Source code in simulai/io.py 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs )","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_output_data","text":"Prepare the output data by scaling it and adding the bias. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The output data to be prepared (Default value = None) None *args () Returns: Type Description ndarray np.ndarray: The prepared output data Note If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) Source code in simulai/io.py 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data )","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_output_structured_data","text":"Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data ndarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () **kwargs {} Returns: Type Description recarray np.recarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. Source code in simulai/io.py 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 class MapValid ( Reshaper ): \"\"\"MapValid is a reshaper class that converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. \"\"\" name = \"map_valid\" def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ] def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data )","title":"MapValid"},{"location":"simulai_io/#simulai.io.MapValid.__init__","text":"Initialize the MapValid class with the configurations and mask passed as parameters. Parameters: Name Type Description Default config dict configurations dictionary, by default None None mask ( int , NaN , inf , optional ) mask to select the invalid values, by default None None channels_last bool if set to True, move the channel dimension to the last, by default True True Source code in simulai/io.py 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last","title":"__init__()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_input_data","text":"Internal input data preparer, executed for each label of the structured array Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) Source code in simulai/io.py 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ]","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_input_structured_data","text":"This function is used to prepare structured input data for further processing. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data )","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_output_data","text":"Prepare output data for the MapValid operation. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], Source code in simulai/io.py 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_output_structured_data","text":"This function is used to prepare structured output data for further processing. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data )","title":"prepare_output_structured_data()"},{"location":"simulai_io/#sampling","text":"Bases: DataPreparer This class is used for sampling data from the input dataset. Source code in simulai/io.py 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 class Sampling ( DataPreparer ): \"\"\"This class is used for sampling data from the input dataset.\"\"\" name = \"sampling\" def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None @property def indices ( self ) -> list : \"\"\"Returns the indices of the data that have been sampled. Returns: list: The indices of the data that have been sampled. Raises: AssertionError: If the indices have not been generated yet. Note: The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1] \"\"\" assert self . sampled_indices is not None , ( \"The indices still were not generate.\" \"Run prepare_input_data or prepare_input_structured_data for getting them.\" ) return sorted ( self . sampled_indices . tolist ()) def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ] def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data","title":"Sampling "},{"location":"simulai_io/#simulai.io.Sampling.indices","text":"Returns the indices of the data that have been sampled. Returns: Name Type Description list list The indices of the data that have been sampled. Raises: Type Description AssertionError If the indices have not been generated yet. Note The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1]","title":"indices"},{"location":"simulai_io/#simulai.io.Sampling.__init__","text":"Initializes the Sampling class. Parameters: Name Type Description Default choices_fraction float The fraction of the dataset to be sampled, by default 0.1 0.1 shuffling bool Whether to shuffle the data before sampling, by default False False Source code in simulai/io.py 711 712 713 714 715 716 717 718 719 720 721 722 723 724 def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None","title":"__init__()"},{"location":"simulai_io/#simulai.io.Sampling.prepare_input_data","text":"Prepare input data for sampling. Parameters: Name Type Description Default data ndarray The input data. Default is None. None data_interval list The interval of data that should be selected. Default is None, None Returns: Type Description ndarray numpy.ndarray: The sampled data. Note: The data_interval parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) Source code in simulai/io.py 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ]","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.Sampling.prepare_input_structured_data","text":"Prepares structured data for further processing. Parameters: Name Type Description Default data Dataset Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) None data_interval list The interval of the data to be prepared, the default shape is [0, data.shape[0]] None batch_size int The size of the batches to be processed, defaults to None None dump_path str (Default value = None) None Returns: Type Description recarray np.recarray: Note The features dimensions of the input data should be 1 in NumPy structured arrays. When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) Source code in simulai/io.py 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data","title":"prepare_input_structured_data()"},{"location":"simulai_io/#movingwindow","text":"MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) Source code in simulai/io.py 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 class MovingWindow : r \"\"\"MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \" def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series ) def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :] def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"MovingWindow"},{"location":"simulai_io/#simulai.io.MovingWindow.__call__","text":"Apply Moving Window over the input data Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note It is expected that the input_data and output_data have the same shape This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) Source code in simulai/io.py 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"__call__()"},{"location":"simulai_io/#simulai.io.MovingWindow.__init__","text":"Initializes the MovingWindow class Parameters: Name Type Description Default history_size int the size of the history window, by default None None skip_size int the number of steps to skip between windows, by default 1 1 horizon_size int the size of the horizon window, by default None None full_output bool flag to use the full output or only the last item, by default True True Source code in simulai/io.py 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \"","title":"__init__()"},{"location":"simulai_io/#simulai.io.MovingWindow.bypass","text":"Does nothing, returns the input batch. Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: the input array Source code in simulai/io.py 955 956 957 958 959 960 961 962 963 964 965 def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch","title":"bypass()"},{"location":"simulai_io/#simulai.io.MovingWindow.get_last_item","text":"Get the last item of a batch Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: Note This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) Source code in simulai/io.py 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :]","title":"get_last_item()"},{"location":"simulai_io/#simulai.io.MovingWindow.transform","text":"Applies the moving window over the time_series array. Parameters: Name Type Description Default time_series ndarray required Returns: Type Description ndarray np.ndarray: the transformed array with the windows. Source code in simulai/io.py 943 944 945 946 947 948 949 950 951 952 953 def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series )","title":"transform()"},{"location":"simulai_io/#slidingwindow","text":"SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: Name Type Description history_size int The number of history samples to include in each window. skip_size int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] Source code in simulai/io.py 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 class SlidingWindow : r \"\"\"SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: history_size : int The number of history samples to include in each window. skip_size : int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \" def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"SlidingWindow"},{"location":"simulai_io/#simulai.io.SlidingWindow.__call__","text":"Applies a sliding window operation on the given time series and returns the windowed samples. Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note history_size and horizon_size should be positive integers history_size should be less than the length of input_data input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) Source code in simulai/io.py 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"__call__()"},{"location":"simulai_io/#simulai.io.SlidingWindow.__init__","text":"Initialize the SlidingWindow object. Parameters: Name Type Description Default history_size int The number of history samples to include in each window. (Default value = None) None skip_size int The number of samples to skip between each window. (Default value = None) None Source code in simulai/io.py 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \"","title":"__init__()"},{"location":"simulai_io/#simulai.io.SlidingWindow.apply","text":"Applies the sliding window to the given time series. Parameters: Name Type Description Default time_series List [ int ] required Returns: Type Description List [ List [ int ]] List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] Source code in simulai/io.py 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples","title":"apply()"},{"location":"simulai_io/#intersectingbatches","text":"IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes. Source code in simulai/io.py 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 class IntersectingBatches : r \"\"\"IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes.\"\"\" def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m ) def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ])","title":"IntersectingBatches"},{"location":"simulai_io/#simulai.io.IntersectingBatches.__call__","text":"Applies the batching strategy to the input data. Parameters: Name Type Description Default input_data ndarray (Default value = None) None Returns: Type Description Union [ list , ndarray ] Union[list, np.ndarray]: A list of batches or a single batch if full attribute is set to False. Note: - If the full attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] Source code in simulai/io.py 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ])","title":"__call__()"},{"location":"simulai_io/#simulai.io.IntersectingBatches.__init__","text":"Initializes the IntersectingBatches class Parameters: Name Type Description Default skip_size int Number of samples to skip between two windows. (Default value = 1) 1 batch_size int Number of samples to use in each batch. (Default value = None) None full bool Whether to include the last batch or not, even if it's not full. (Default value = True) True Source code in simulai/io.py 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full","title":"__init__()"},{"location":"simulai_io/#simulai.io.IntersectingBatches.get_indices","text":"It gets just the indices of the shifting Parameters: Name Type Description Default dim int total dimension (Default value = None) None Returns: Type Description ndarray np.ndarray: the shifted indices Source code in simulai/io.py 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m )","title":"get_indices()"},{"location":"simulai_io/#batchwiseextrapolation","text":"BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: Name Type Description time_id int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape Source code in simulai/io.py 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 class BatchwiseExtrapolation : r \"\"\"BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: time_id : int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape \"\"\" def __init__ ( self , op : callable = None , auxiliary_data : np . ndarray = None ) -> None : self . op = op self . auxiliary_data = auxiliary_data self . time_id = 0 def _simple_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : r \"\"\"Given the current extrapolation dataset, use the last history_size number of rows to create the next state of the dataset. Args: extrapolation_dataset (np.ndarray): The current state of the extrapolation dataset. history_size (int, optional): (Default value = 0) Returns: np.ndarray: The next state of the extrapolation dataset. \"\"\" return extrapolation_dataset [ None , - history_size :, :] def _forcing_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : return np . hstack ( [ extrapolation_dataset [ - history_size :, :], self . auxiliary_data [ self . time_id - history_size : self . time_id , :], ] )[ None , :, :] def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset","title":"BatchwiseExtrapolation"},{"location":"simulai_io/#simulai.io.BatchwiseExtrapolation.__call__","text":"A function that performs the extrapolation of the time series. Parameters: Name Type Description Default init_state ndarray initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) None history_size int the size of the history window used in the extrapolation. (Default value = None) None horizon_size int the size of the horizon window used in the extrapolation. (Default value = None) None testing_data_size int (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) Source code in simulai/io.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset","title":"__call__()"},{"location":"simulai_io/#batchcopy","text":"A class for copying data in batches and applying a transformation function. Source code in simulai/io.py 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 class BatchCopy : r \"\"\"A class for copying data in batches and applying a transformation function.\"\"\" def __init__ ( self , channels_last : bool = False ) -> None : self . channels_last = channels_last def _single_copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy data from a single h5py.Dataset to another h5py.Dataset in batches. Args: data (h5py.Dataset, optional): (Default value = None) data_interval (list, optional): The interval of the data to be copied. (Default value = None) batch_size (int, optional): The size of the batch to be copied. (Default value = None) dump_path (str, optional): The path where the new h5py.Dataset will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The new h5py.Dataset after the copy process. Note: - Copy data from data_file.h5/data to data_copy.h5/data with a batch size of 1000: - The input must be an h5py.Dataset. Example: >>> data = h5py.File(\"data_file.h5\", \"r\") >>> batch_copy = BatchCopy() >>> dset = batch_copy._single_copy(data=data[\"data\"], data_interval=[0, 100000], batch_size=1000, dump_path=\"data_copy.h5\") \"\"\" assert isinstance ( data , h5py . Dataset ), \"The input must be h5py.Dataset\" variables_list = data . dtype . names data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def _multiple_copy ( self , data : list = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy and concatenate multiple h5py.Dataset objects into a single h5py.Dataset object. Args: data (list, optional): A list of h5py.Dataset objects to be concatenated. (Default value = None) data_interval (list, optional): A list of two integers indicating the start and end index of the data to be concatenated. (Default value = None) batch_size (int, optional): The number of samples to be processed at a time. (Default value = None) dump_path (str, optional): The file path where the concatenated h5py.Dataset object will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The concatenated h5py.Dataset object. \"\"\" assert all ( [ isinstance ( di , h5py . Dataset ) for di in data ] ), \"All inputs must be h5py.Dataset\" variables_list = sum ([ list ( di . dtype . names ) for di in data ], []) data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data [ 0 ] . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , )","title":"BatchCopy"},{"location":"simulai_io/#simulai.io.BatchCopy.copy","text":"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Parameters: Name Type Description Default data Dataset input data to be copied (Default value = None) None data_interval list the range of the data to be copied (Default value = None) None batch_size int the size of the batches to be used to copy the data (Default value = None) None dump_path str the path of the file where the data will be copied (Default value = None) None transformation callable (Default value = lambda data: data) lambda : data Returns: Type Description Dataset h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the _multiple_copy function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) Source code in simulai/io.py 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , )","title":"copy()"},{"location":"simulai_io/#maketensor","text":"This class is used to make torch tensors from numpy arrays or dictionaries. Parameters: Name Type Description Default input_names List [ str ] list of input names. None output_names List [ str ] list of output names. None Note input_tensors will be a list of tensors in case of numpy array and dictionary inputs. The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. The input_data will be converted to float32 dtype. The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) Source code in simulai/io.py 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 class MakeTensor : r \"\"\"This class is used to make torch tensors from numpy arrays or dictionaries. Args: input_names (List[str]): list of input names. output_names (List[str]): list of output names. Note: - input_tensors will be a list of tensors in case of numpy array and dictionary inputs. - The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. - The input_data will be converted to float32 dtype. - The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. - If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) \"\"\" def __init__ ( self , input_names = None , output_names = None ): self . input_names = input_names self . output_names = output_names def _make_tensor ( self , input_data : np . ndarray = None , device : str = \"cpu\" ) -> List [ torch . Tensor ]: r \"\"\"Convert input_data to a list of torch tensors. Args: input_data (np.ndarray, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: List[torch.Tensor]: list of tensors. \"\"\" inputs_list = list ( torch . split ( input_data , 1 , dim =- 1 )) for vv , var in enumerate ( inputs_list ): var . requires_grad = True var = var . to ( device ) inputs_list [ vv ] = var # var = var[..., None] return inputs_list def _make_tensor_dict ( self , input_data : dict = None , device : str = \"cpu\" ) -> dict : r \"\"\"Convert input_data to a dictionary of torch tensors. Args: input_data (dict, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: dict: dictionary of tensors. \"\"\" inputs_dict = dict () for key , item in input_data . items (): item . requires_grad = True item = item . to ( device ) inputs_dict [ key ] = item return inputs_dict def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" )","title":"MakeTensor"},{"location":"simulai_io/#simulai.io.MakeTensor.__call__","text":"Make tensors from input_data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor , Dict [ str , ndarray ]] input data to be converted. (Default value = None) None device str (Default value = \"cpu\") 'cpu' Returns: Type Description List [ Tensor ] Union[List[torch.Tensor], dict]: Raises: Type Description - Exception Source code in simulai/io.py 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" )","title":"__call__()"},{"location":"simulai_io/#gaussiannoise","text":"Bases: Dataset GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) Source code in simulai/io.py 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 class GaussianNoise ( Dataset ): r \"\"\"GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) \"\"\" def __init__ ( self , stddev : float = 0.01 , input_data : Union [ np . ndarray , Tensor ] = None ): super ( Dataset , self ) . __init__ () self . stddev = stddev if isinstance ( input_data , np . ndarray ): input_data_ = torch . from_numpy ( input_data . astype ( \"float32\" )) else : input_data_ = input_data self . input_data = input_data_ self . data_shape = tuple ( self . input_data . shape ) def size ( self ): return self . data_shape def __call__ ( self ): return ( 1 + self . stddev * torch . randn ( * self . data_shape )) * self . input_data","title":"GaussianNoise"},{"location":"simulai_io/#tokenizer","text":"Wrapper for multiple tokenization approaches Source code in simulai/io.py 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 class Tokenizer : \"\"\"Wrapper for multiple tokenization approaches\"\"\" def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" ) def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs ) def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs ) def _make_time_input_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None , step : float = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) step (float): Size of the timestep. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized input dataset. \"\"\" dim = num_step src = np . repeat ( np . expand_dims ( src , axis = 1 ), dim , axis = 1 ) src_shape = src . shape src_shape_list = list ( src_shape ) src_shape_list [ - 1 ] += 1 src_final = np . zeros ( tuple ( src_shape_list )) src_final [:, :, : - 1 ] = src for i in range ( num_step ): src_final [:, i , - 1 ] += step * i return src_final [: - num_step + 1 ] def _make_time_target_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized target dataset. \"\"\" moving_window = MovingWindow ( history_size = 1 , skip_size = 1 , horizon_size = num_step - 1 ) input_data , output_data = moving_window ( input_data = src , output_data = src ) return np . concatenate ([ input_data , output_data ], axis = 1 )","title":"Tokenizer"},{"location":"simulai_io/#simulai.io.Tokenizer.__init__","text":"Parameters: Name Type Description Default kind str The kind of tokenization to be used. (Default value = \"time_indexer\") 'time_indexer' Source code in simulai/io.py 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" )","title":"__init__()"},{"location":"simulai_io/#simulai.io.Tokenizer.generate_input_tokens","text":"Generating the input sequence of tokens. Source code in simulai/io.py 1810 1811 1812 1813 1814 def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs )","title":"generate_input_tokens()"},{"location":"simulai_io/#simulai.io.Tokenizer.generate_target_tokens","text":"Generating the target sequence of tokens. Source code in simulai/io.py 1816 1817 1818 1819 1820 def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs )","title":"generate_target_tokens()"},{"location":"simulai_parallel/","text":"red { color: red } Parallel # PipelineMPI # PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. Source code in simulai/parallel.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 class PipelineMPI : \"\"\"PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. \"\"\" def __init__ ( self , exec : callable = None , extra_params : dict = None , collect : bool = None , show_log : bool = True , ) -> None : self . exec = exec self . show_log = show_log if extra_params is not None : self . extra_params = extra_params else : self . extra_params = {} self . collect = collect self . comm = MPI . COMM_WORLD self . n_procs = self . comm . Get_size () self . status = ( self . n_procs - 1 ) * [ False ] self . status_dict = dict () def _check_kwargs_consistency ( self , kwargs : dict = None ) -> int : \"\"\"It checks if the kwargs provided for each worker have the same length. Args: kwargs (dict, optional): a dictionary containing the kwargs of all (Default value = None) Returns: int: Length of the batch sent for each worker. \"\"\" types = [ type ( value ) for value in kwargs . values ()] lengths = [ len ( value ) for value in kwargs . values ()] assert all ([ t == list for t in types ]), ( f \"All the elements in kwargs must be list,\" f \" but received { types } .\" ) assert len ( set ( lengths )) == 1 , ( f \"All the elements in kwargs must be the same length,\" f \" but received { lengths } \" ) print ( \"kwargs is alright.\" ) return lengths [ 0 ] def _split_kwargs ( self , kwargs : dict , rank : int , size : int , total_size : int ) -> Tuple [ dict , int ]: \"\"\"It allows the workload be executed serially in each worker node Args: kwargs (dict): A dictionary containing kwargs, which will be distributed for all the workers. rank (int): The index of the rank. size (int): The number of available workers. total_size (int): The total number of elements to be distributed among the workers. Returns: kwargs_batch: A dictionary containing the kwargs to be sent for each worker. batch_size: The batch size, which corresponds to the number of elements to be sent for each worker. \"\"\" # Decrement rank and size by 1, because they are usually 0-indexed in Python size -= 1 rank -= 1 # Calculate batch size and remainder using divmod() function batch_size , remainder = divmod ( total_size , size ) # If rank is less than remainder, calculate kwargs_batch using batch size + 1 if rank < remainder : kwargs_batch = { key : value [ rank * ( batch_size + 1 ) : ( rank + 1 ) * ( batch_size + 1 )] for key , value in kwargs . items () } return kwargs_batch , batch_size + 1 # If rank is not less than remainder, calculate kwargs_batch using batch size else : kwargs_batch = { key : value [ remainder * ( batch_size + 1 ) + ( rank - remainder ) * batch_size : ( rank - remainder + 1 ) * batch_size ] for key , value in kwargs . items () } return kwargs_batch , batch_size def _attribute_dict_output ( self , dicts : list = None ) -> None : root = dict () for e in dicts : root . update ( e ) for key , value in root . items (): self . status_dict [ key ] = value @staticmethod def inner_type ( obj : list = None ): types_list = [ type ( o ) for o in obj ] assert len ( set ( types_list )) == 1 , \"Composed types are not supported.\" return types_list [ 0 ] def _exec_wrapper ( self , kwargs : dict , total_size : int ) -> None : \"\"\"A wrapper method around exec to facilitate the instantiation of each worker. Args: kwargs (dict): A dictionary containing kwargs for the worker. total_size (int): The total number of elements. \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () size = comm . Get_size () size_ = size # Rank 0 is the 'master' node # The worker nodes execute their workload and send a message to # master if rank != 0 : print ( f \"Executing rank { rank } .\" ) kwargs_batch , batch_size = self . _split_kwargs ( kwargs , rank , size_ , total_size ) kwargs_batch_list = [ { key : value [ j ] for key , value in kwargs_batch . items ()} for j in range ( batch_size ) ] out = list () for i in kwargs_batch_list : print ( f \"Executing batch { i [ 'key' ] } in rank { rank } \" ) # Concatenate the rank to the extra parameters i . update ( self . extra_params ) # Appending the result of the operation self.exec to the partial list out . append ( self . exec ( ** i )) if self . collect is True : msg = out else : msg = 1 if self . show_log : print ( f \"Sending the output { msg } to rank 0\" ) comm . send ( msg , dest = 0 ) print ( f \"Execution concluded for rank { rank } .\" ) # The master awaits the responses of each worker node elif rank == 0 : for r in range ( 1 , size ): msg = comm . recv ( source = r ) self . status [ r - 1 ] = msg if self . inner_type ( msg ) == dict : self . _attribute_dict_output ( dicts = msg ) if self . show_log : print ( f \"Rank 0 received { msg } from rank { r } \" ) comm . barrier () @property def success ( self ) -> bool : \"\"\"It returns True if the entire process worked without issues. \"\"\" return all ( self . status ) def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier () success : bool property # It returns True if the entire process worked without issues. run ( kwargs = None ) # It runs the MPI job Parameters: Name Type Description Default kwargs dict A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) None Source code in simulai/parallel.py 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier ()","title":"Simulai parallel"},{"location":"simulai_parallel/#parallel","text":"","title":"Parallel"},{"location":"simulai_parallel/#pipelinempi","text":"PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. Source code in simulai/parallel.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 class PipelineMPI : \"\"\"PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. \"\"\" def __init__ ( self , exec : callable = None , extra_params : dict = None , collect : bool = None , show_log : bool = True , ) -> None : self . exec = exec self . show_log = show_log if extra_params is not None : self . extra_params = extra_params else : self . extra_params = {} self . collect = collect self . comm = MPI . COMM_WORLD self . n_procs = self . comm . Get_size () self . status = ( self . n_procs - 1 ) * [ False ] self . status_dict = dict () def _check_kwargs_consistency ( self , kwargs : dict = None ) -> int : \"\"\"It checks if the kwargs provided for each worker have the same length. Args: kwargs (dict, optional): a dictionary containing the kwargs of all (Default value = None) Returns: int: Length of the batch sent for each worker. \"\"\" types = [ type ( value ) for value in kwargs . values ()] lengths = [ len ( value ) for value in kwargs . values ()] assert all ([ t == list for t in types ]), ( f \"All the elements in kwargs must be list,\" f \" but received { types } .\" ) assert len ( set ( lengths )) == 1 , ( f \"All the elements in kwargs must be the same length,\" f \" but received { lengths } \" ) print ( \"kwargs is alright.\" ) return lengths [ 0 ] def _split_kwargs ( self , kwargs : dict , rank : int , size : int , total_size : int ) -> Tuple [ dict , int ]: \"\"\"It allows the workload be executed serially in each worker node Args: kwargs (dict): A dictionary containing kwargs, which will be distributed for all the workers. rank (int): The index of the rank. size (int): The number of available workers. total_size (int): The total number of elements to be distributed among the workers. Returns: kwargs_batch: A dictionary containing the kwargs to be sent for each worker. batch_size: The batch size, which corresponds to the number of elements to be sent for each worker. \"\"\" # Decrement rank and size by 1, because they are usually 0-indexed in Python size -= 1 rank -= 1 # Calculate batch size and remainder using divmod() function batch_size , remainder = divmod ( total_size , size ) # If rank is less than remainder, calculate kwargs_batch using batch size + 1 if rank < remainder : kwargs_batch = { key : value [ rank * ( batch_size + 1 ) : ( rank + 1 ) * ( batch_size + 1 )] for key , value in kwargs . items () } return kwargs_batch , batch_size + 1 # If rank is not less than remainder, calculate kwargs_batch using batch size else : kwargs_batch = { key : value [ remainder * ( batch_size + 1 ) + ( rank - remainder ) * batch_size : ( rank - remainder + 1 ) * batch_size ] for key , value in kwargs . items () } return kwargs_batch , batch_size def _attribute_dict_output ( self , dicts : list = None ) -> None : root = dict () for e in dicts : root . update ( e ) for key , value in root . items (): self . status_dict [ key ] = value @staticmethod def inner_type ( obj : list = None ): types_list = [ type ( o ) for o in obj ] assert len ( set ( types_list )) == 1 , \"Composed types are not supported.\" return types_list [ 0 ] def _exec_wrapper ( self , kwargs : dict , total_size : int ) -> None : \"\"\"A wrapper method around exec to facilitate the instantiation of each worker. Args: kwargs (dict): A dictionary containing kwargs for the worker. total_size (int): The total number of elements. \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () size = comm . Get_size () size_ = size # Rank 0 is the 'master' node # The worker nodes execute their workload and send a message to # master if rank != 0 : print ( f \"Executing rank { rank } .\" ) kwargs_batch , batch_size = self . _split_kwargs ( kwargs , rank , size_ , total_size ) kwargs_batch_list = [ { key : value [ j ] for key , value in kwargs_batch . items ()} for j in range ( batch_size ) ] out = list () for i in kwargs_batch_list : print ( f \"Executing batch { i [ 'key' ] } in rank { rank } \" ) # Concatenate the rank to the extra parameters i . update ( self . extra_params ) # Appending the result of the operation self.exec to the partial list out . append ( self . exec ( ** i )) if self . collect is True : msg = out else : msg = 1 if self . show_log : print ( f \"Sending the output { msg } to rank 0\" ) comm . send ( msg , dest = 0 ) print ( f \"Execution concluded for rank { rank } .\" ) # The master awaits the responses of each worker node elif rank == 0 : for r in range ( 1 , size ): msg = comm . recv ( source = r ) self . status [ r - 1 ] = msg if self . inner_type ( msg ) == dict : self . _attribute_dict_output ( dicts = msg ) if self . show_log : print ( f \"Rank 0 received { msg } from rank { r } \" ) comm . barrier () @property def success ( self ) -> bool : \"\"\"It returns True if the entire process worked without issues. \"\"\" return all ( self . status ) def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier ()","title":"PipelineMPI"},{"location":"simulai_parallel/#simulai.parallel.PipelineMPI.success","text":"It returns True if the entire process worked without issues.","title":"success"},{"location":"simulai_parallel/#simulai.parallel.PipelineMPI.run","text":"It runs the MPI job Parameters: Name Type Description Default kwargs dict A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) None Source code in simulai/parallel.py 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier ()","title":"run()"},{"location":"simulai_residuals/","text":"red { color: red } simulai.residuals # Bases: Module The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: Name Type Description object An instance of the SymbolicOperatorClass. Source code in simulai/residuals/_pytorch_residuals.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 class SymbolicOperator ( torch . nn . Module ): \"\"\"The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: object: An instance of the SymbolicOperatorClass. \"\"\" def __init__ ( self , expressions : List [ Union [ sympy . Expr , str ]] = None , input_vars : List [ Union [ sympy . Symbol , str ]] = None , output_vars : List [ Union [ sympy . Symbol , str ]] = None , function : callable = None , gradient : callable = None , keys : str = None , inputs_key = None , constants : dict = None , trainable_parameters : dict = None , external_functions : dict = dict (), processing : str = \"serial\" , device : str = \"cpu\" , engine : str = \"torch\" , auxiliary_expressions : list = None , ) -> None : if engine == \"torch\" : super ( SymbolicOperator , self ) . __init__ () else : pass self . engine = importlib . import_module ( engine ) self . constants = constants if trainable_parameters is not None : self . trainable_parameters = trainable_parameters else : self . trainable_parameters = dict () self . external_functions = external_functions self . processing = processing self . periodic_bc_protected_key = \"periodic\" self . protected_funcs = [ \"cos\" , \"sin\" , \"sqrt\" , \"exp\" ] self . protected_operators = [ \"L\" , \"Div\" , \"Identity\" , \"Kronecker\" ] self . protected_funcs_subs = self . _construct_protected_functions () self . protected_operators_subs = self . _construct_implict_operators () # Configuring the device to be used during the fitting process if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" else : device = \"cuda\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . expressions = [ self . _parse_expression ( expr = expr ) for expr in expressions ] if isinstance ( auxiliary_expressions , dict ): self . auxiliary_expressions = { key : self . _parse_expression ( expr = expr ) for key , expr in auxiliary_expressions . items () } else : self . auxiliary_expressions = auxiliary_expressions self . input_vars = [ self . _parse_variable ( var = var ) for var in input_vars ] self . output_vars = [ self . _parse_variable ( var = var ) for var in output_vars ] self . input_names = [ var . name for var in self . input_vars ] self . output_names = [ var . name for var in self . output_vars ] self . keys = keys if inputs_key != None : self . inputs_key = self . _parse_inputs_key ( inputs_key = inputs_key ) else : self . inputs_key = inputs_key self . all_vars = self . input_vars + self . output_vars if self . inputs_key is not None : self . forward = self . _forward_dict else : self . forward = self . _forward_tensor self . function = function self . diff_symbol = D self . output = None self . f_expressions = list () self . g_expressions = dict () self . feed_vars = None for name in self . output_names : setattr ( self , name , None ) # Defining functions for returning each variable of the regression # function for index , name in enumerate ( self . output_names ): setattr ( self , name , lambda data : self . function . forward ( input_data = data )[ ... , index ][ ... , None ], ) # If no external gradient is provided, use the core gradient evaluator if gradient is None : gradient_function = self . gradient else : gradient_function = gradient subs = { self . diff_symbol . name : gradient_function } subs . update ( self . external_functions ) subs . update ( self . protected_funcs_subs ) for expr in self . expressions : if not callable ( expr ): f_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : f_expr = expr self . f_expressions . append ( f_expr ) if self . auxiliary_expressions is not None : for key , expr in self . auxiliary_expressions . items (): if not callable ( expr ): g_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : g_expr = expr self . g_expressions [ key ] = g_expr # Method for executing the expressions evaluation if self . processing == \"serial\" : self . process_expression = self . _process_expression_serial else : raise Exception ( f \"Processing case { self . processing } not supported.\" ) def _construct_protected_functions ( self ): \"\"\"This function creates a dictionary of protected functions from the engine object attribute. Returns: dict: A dictionary of function names and their corresponding function objects. \"\"\" protected_funcs = { func : getattr ( self . engine , func ) for func in self . protected_funcs } return protected_funcs def _construct_implict_operators ( self ): \"\"\"This function creates a dictionary of protected operators from the operators engine module. Returns: dict: A dictionary of operator names and their corresponding function objects. \"\"\" operators_engine = importlib . import_module ( \"simulai.tokens\" ) protected_operators = { func : getattr ( operators_engine , func ) for func in self . protected_operators } return protected_operators def _parse_key_interval ( self , intv : str ) -> List : begin , end = intv . split ( \",\" ) end = int ( end [: - 1 ]) begin = int ( begin ) end = int ( end + 1 ) return np . arange ( begin , end ) . astype ( int ) . tolist () def _parse_inputs_key ( self , inputs_key : str = None ) -> dict : # Sentences separator: '|' sep = \"|\" # Index identifier: ':' inx = \":\" # Interval identifier intv = \"[\" # Removing possible spaces in the inputs_key string inputs_key = inputs_key . replace ( \" \" , \"\" ) try : split_components = inputs_key . split ( sep ) except ValueError : split_components = inputs_key keys_dict = dict () for s in split_components : try : if len ( s . split ( inx )) > 1 : key , index = s . split ( inx ) if not key in keys_dict : keys_dict [ key ] = list () keys_dict [ key ] . append ( int ( index )) else : keys_dict [ key ] . append ( int ( index )) elif len ( s . split ( intv )) > 1 : key , interval_str = s . split ( intv ) interval = self . _parse_key_interval ( interval_str ) keys_dict [ key ] = interval else : raise ValueError except ValueError : keys_dict [ s ] = - 1 return keys_dict def _collect_data_from_inputs_list ( self , inputs_list : dict = None ) -> list : data = list () for k , v in self . inputs_key . items (): if v == - 1 : if inputs_list [ k ] . shape [ 1 ] == 1 : data_ = [ inputs_list [ k ]] else : data_ = list ( torch . split ( inputs_list [ k ], 1 , dim = 1 )) else : data_ = [ inputs_list [ k ][:, i : i + 1 ] for i in v ] data += data_ return data def _parse_expression ( self , expr = Union [ sympy . Expr , str ]) -> sympy . Expr : \"\"\"Parses the input expression and returns a SymPy expression. Args: expr (Union[sympy.Expr, str], optional, optional): The expression to parse, by default None. It can either be a SymPy expression or a string. Returns: sympy.Expr: The parsed SymPy expression. Raises: Exception: If the `constants` attribute is not defined, and the input expression is a string. \"\"\" if isinstance ( expr , str ): try : expr_ = sympify ( expr , locals = self . protected_operators_subs , evaluate = False ) if self . constants is not None : expr_ = expr_ . subs ( self . constants ) if self . trainable_parameters is not None : expr_ = expr_ . subs ( self . trainable_parameters ) except ValueError : if self . constants is not None : _expr = expr for key , value in self . constants . items (): _expr = _expr . replace ( key , str ( value )) expr_ = parse_expr ( _expr , evaluate = 0 ) else : raise Exception ( \"It is necessary to define a constants dict.\" ) elif callable ( expr ): expr_ = expr else : if self . constants is not None : expr_ = expr . subs ( self . constants ) else : expr_ = expr return expr_ def _parse_variable ( self , var = Union [ sympy . Symbol , str ]) -> sympy . Symbol : \"\"\"Parse the input variable and return a SymPy Symbol. Args: var (Union[sympy.Symbol, str], optional, optional): The input variable, either a SymPy Symbol or a string. (Default value = Union[sympy.Symbol, str]) Returns: sympy.Symbol: A SymPy Symbol representing the input variable. \"\"\" if isinstance ( var , str ): return sympy . Symbol ( var ) else : return var def _forward_tensor ( self , input_data : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward the input tensor through the function. Args: input_data (torch.Tensor, optional): The input tensor. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( input_data = input_data ) def _forward_dict ( self , input_data : dict = None ) -> torch . Tensor : \"\"\"Forward the input dictionary through the function. Args: input_data (dict, optional): The input dictionary. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( ** input_data ) def _process_expression_serial ( self , feed_vars : dict = None ) -> List [ torch . Tensor ]: \"\"\"Process the expression list serially using the given feed variables. Args: feed_vars (dict, optional): The feed variables. (Default value = None) Returns: List[torch.Tensor]: A list of tensors after evaluating the expressions serially. \"\"\" return [ f ( ** feed_vars ) . to ( self . device ) for f in self . f_expressions ] def _process_expression_individual ( self , index : int = None , feed_vars : dict = None ) -> torch . Tensor : \"\"\"Evaluates a single expression specified by index from the f_expressions list with given feed variables. Args: index (int, optional): Index of the expression to be evaluated, by default None feed_vars (dict, optional): Dictionary of feed variables, by default None Returns: torch.Tensor: Result of evaluating the specified expression with given feed variables \"\"\" return self . f_expressions [ index ]( ** feed_vars ) . to ( self . device ) def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars ) def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars ) @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ] def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs ) __call__ ( inputs_data = None ) # Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Parameters: Name Type Description Default inputs_data Union [ ndarray , dict ] Union (Default value = None) None Returns: Name Type Description List [ Tensor ] List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises List [ Tensor ] Raises: Type Description does not match with the inputs_key attribute Source code in simulai/residuals/_pytorch_residuals.py 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars ) eval_expression ( key , inputs_list ) # This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Parameters: Name Type Description Default key str the key used to retrieve the expression from the 'g_expressions' attribute required inputs_list list either a list of arrays, an np.ndarray, or a dict containing the inputs to the function required Returns: Type Description the result of evaluating the expression using the inputs.: Source code in simulai/residuals/_pytorch_residuals.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars ) gradient ( feature , param ) staticmethod # Calculates the gradient of the given feature with respect to the given parameter. Parameters: Name Type Description Default feature Tensor Tensor with the input feature. required param Tensor Tensor with the parameter to calculate the gradient with respect to. required Returns: Type Description torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ] jac ( inputs ) # Calculates the Jacobian of the forward function of the model with respect to its inputs. Parameters: Name Type Description Default inputs Tensor Tensor with the input data to the forward function. required Returns: Type Description torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs )","title":"Simulai residuals"},{"location":"simulai_residuals/#simulairesiduals","text":"Bases: Module The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: Name Type Description object An instance of the SymbolicOperatorClass. Source code in simulai/residuals/_pytorch_residuals.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 class SymbolicOperator ( torch . nn . Module ): \"\"\"The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: object: An instance of the SymbolicOperatorClass. \"\"\" def __init__ ( self , expressions : List [ Union [ sympy . Expr , str ]] = None , input_vars : List [ Union [ sympy . Symbol , str ]] = None , output_vars : List [ Union [ sympy . Symbol , str ]] = None , function : callable = None , gradient : callable = None , keys : str = None , inputs_key = None , constants : dict = None , trainable_parameters : dict = None , external_functions : dict = dict (), processing : str = \"serial\" , device : str = \"cpu\" , engine : str = \"torch\" , auxiliary_expressions : list = None , ) -> None : if engine == \"torch\" : super ( SymbolicOperator , self ) . __init__ () else : pass self . engine = importlib . import_module ( engine ) self . constants = constants if trainable_parameters is not None : self . trainable_parameters = trainable_parameters else : self . trainable_parameters = dict () self . external_functions = external_functions self . processing = processing self . periodic_bc_protected_key = \"periodic\" self . protected_funcs = [ \"cos\" , \"sin\" , \"sqrt\" , \"exp\" ] self . protected_operators = [ \"L\" , \"Div\" , \"Identity\" , \"Kronecker\" ] self . protected_funcs_subs = self . _construct_protected_functions () self . protected_operators_subs = self . _construct_implict_operators () # Configuring the device to be used during the fitting process if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" else : device = \"cuda\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . expressions = [ self . _parse_expression ( expr = expr ) for expr in expressions ] if isinstance ( auxiliary_expressions , dict ): self . auxiliary_expressions = { key : self . _parse_expression ( expr = expr ) for key , expr in auxiliary_expressions . items () } else : self . auxiliary_expressions = auxiliary_expressions self . input_vars = [ self . _parse_variable ( var = var ) for var in input_vars ] self . output_vars = [ self . _parse_variable ( var = var ) for var in output_vars ] self . input_names = [ var . name for var in self . input_vars ] self . output_names = [ var . name for var in self . output_vars ] self . keys = keys if inputs_key != None : self . inputs_key = self . _parse_inputs_key ( inputs_key = inputs_key ) else : self . inputs_key = inputs_key self . all_vars = self . input_vars + self . output_vars if self . inputs_key is not None : self . forward = self . _forward_dict else : self . forward = self . _forward_tensor self . function = function self . diff_symbol = D self . output = None self . f_expressions = list () self . g_expressions = dict () self . feed_vars = None for name in self . output_names : setattr ( self , name , None ) # Defining functions for returning each variable of the regression # function for index , name in enumerate ( self . output_names ): setattr ( self , name , lambda data : self . function . forward ( input_data = data )[ ... , index ][ ... , None ], ) # If no external gradient is provided, use the core gradient evaluator if gradient is None : gradient_function = self . gradient else : gradient_function = gradient subs = { self . diff_symbol . name : gradient_function } subs . update ( self . external_functions ) subs . update ( self . protected_funcs_subs ) for expr in self . expressions : if not callable ( expr ): f_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : f_expr = expr self . f_expressions . append ( f_expr ) if self . auxiliary_expressions is not None : for key , expr in self . auxiliary_expressions . items (): if not callable ( expr ): g_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : g_expr = expr self . g_expressions [ key ] = g_expr # Method for executing the expressions evaluation if self . processing == \"serial\" : self . process_expression = self . _process_expression_serial else : raise Exception ( f \"Processing case { self . processing } not supported.\" ) def _construct_protected_functions ( self ): \"\"\"This function creates a dictionary of protected functions from the engine object attribute. Returns: dict: A dictionary of function names and their corresponding function objects. \"\"\" protected_funcs = { func : getattr ( self . engine , func ) for func in self . protected_funcs } return protected_funcs def _construct_implict_operators ( self ): \"\"\"This function creates a dictionary of protected operators from the operators engine module. Returns: dict: A dictionary of operator names and their corresponding function objects. \"\"\" operators_engine = importlib . import_module ( \"simulai.tokens\" ) protected_operators = { func : getattr ( operators_engine , func ) for func in self . protected_operators } return protected_operators def _parse_key_interval ( self , intv : str ) -> List : begin , end = intv . split ( \",\" ) end = int ( end [: - 1 ]) begin = int ( begin ) end = int ( end + 1 ) return np . arange ( begin , end ) . astype ( int ) . tolist () def _parse_inputs_key ( self , inputs_key : str = None ) -> dict : # Sentences separator: '|' sep = \"|\" # Index identifier: ':' inx = \":\" # Interval identifier intv = \"[\" # Removing possible spaces in the inputs_key string inputs_key = inputs_key . replace ( \" \" , \"\" ) try : split_components = inputs_key . split ( sep ) except ValueError : split_components = inputs_key keys_dict = dict () for s in split_components : try : if len ( s . split ( inx )) > 1 : key , index = s . split ( inx ) if not key in keys_dict : keys_dict [ key ] = list () keys_dict [ key ] . append ( int ( index )) else : keys_dict [ key ] . append ( int ( index )) elif len ( s . split ( intv )) > 1 : key , interval_str = s . split ( intv ) interval = self . _parse_key_interval ( interval_str ) keys_dict [ key ] = interval else : raise ValueError except ValueError : keys_dict [ s ] = - 1 return keys_dict def _collect_data_from_inputs_list ( self , inputs_list : dict = None ) -> list : data = list () for k , v in self . inputs_key . items (): if v == - 1 : if inputs_list [ k ] . shape [ 1 ] == 1 : data_ = [ inputs_list [ k ]] else : data_ = list ( torch . split ( inputs_list [ k ], 1 , dim = 1 )) else : data_ = [ inputs_list [ k ][:, i : i + 1 ] for i in v ] data += data_ return data def _parse_expression ( self , expr = Union [ sympy . Expr , str ]) -> sympy . Expr : \"\"\"Parses the input expression and returns a SymPy expression. Args: expr (Union[sympy.Expr, str], optional, optional): The expression to parse, by default None. It can either be a SymPy expression or a string. Returns: sympy.Expr: The parsed SymPy expression. Raises: Exception: If the `constants` attribute is not defined, and the input expression is a string. \"\"\" if isinstance ( expr , str ): try : expr_ = sympify ( expr , locals = self . protected_operators_subs , evaluate = False ) if self . constants is not None : expr_ = expr_ . subs ( self . constants ) if self . trainable_parameters is not None : expr_ = expr_ . subs ( self . trainable_parameters ) except ValueError : if self . constants is not None : _expr = expr for key , value in self . constants . items (): _expr = _expr . replace ( key , str ( value )) expr_ = parse_expr ( _expr , evaluate = 0 ) else : raise Exception ( \"It is necessary to define a constants dict.\" ) elif callable ( expr ): expr_ = expr else : if self . constants is not None : expr_ = expr . subs ( self . constants ) else : expr_ = expr return expr_ def _parse_variable ( self , var = Union [ sympy . Symbol , str ]) -> sympy . Symbol : \"\"\"Parse the input variable and return a SymPy Symbol. Args: var (Union[sympy.Symbol, str], optional, optional): The input variable, either a SymPy Symbol or a string. (Default value = Union[sympy.Symbol, str]) Returns: sympy.Symbol: A SymPy Symbol representing the input variable. \"\"\" if isinstance ( var , str ): return sympy . Symbol ( var ) else : return var def _forward_tensor ( self , input_data : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward the input tensor through the function. Args: input_data (torch.Tensor, optional): The input tensor. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( input_data = input_data ) def _forward_dict ( self , input_data : dict = None ) -> torch . Tensor : \"\"\"Forward the input dictionary through the function. Args: input_data (dict, optional): The input dictionary. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( ** input_data ) def _process_expression_serial ( self , feed_vars : dict = None ) -> List [ torch . Tensor ]: \"\"\"Process the expression list serially using the given feed variables. Args: feed_vars (dict, optional): The feed variables. (Default value = None) Returns: List[torch.Tensor]: A list of tensors after evaluating the expressions serially. \"\"\" return [ f ( ** feed_vars ) . to ( self . device ) for f in self . f_expressions ] def _process_expression_individual ( self , index : int = None , feed_vars : dict = None ) -> torch . Tensor : \"\"\"Evaluates a single expression specified by index from the f_expressions list with given feed variables. Args: index (int, optional): Index of the expression to be evaluated, by default None feed_vars (dict, optional): Dictionary of feed variables, by default None Returns: torch.Tensor: Result of evaluating the specified expression with given feed variables \"\"\" return self . f_expressions [ index ]( ** feed_vars ) . to ( self . device ) def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars ) def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars ) @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ] def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs )","title":"simulai.residuals"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.__call__","text":"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Parameters: Name Type Description Default inputs_data Union [ ndarray , dict ] Union (Default value = None) None Returns: Name Type Description List [ Tensor ] List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises List [ Tensor ] Raises: Type Description does not match with the inputs_key attribute Source code in simulai/residuals/_pytorch_residuals.py 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars )","title":"__call__()"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.eval_expression","text":"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Parameters: Name Type Description Default key str the key used to retrieve the expression from the 'g_expressions' attribute required inputs_list list either a list of arrays, an np.ndarray, or a dict containing the inputs to the function required Returns: Type Description the result of evaluating the expression using the inputs.: Source code in simulai/residuals/_pytorch_residuals.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars )","title":"eval_expression()"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.gradient","text":"Calculates the gradient of the given feature with respect to the given parameter. Parameters: Name Type Description Default feature Tensor Tensor with the input feature. required param Tensor Tensor with the parameter to calculate the gradient with respect to. required Returns: Type Description torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ]","title":"gradient()"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.jac","text":"Calculates the Jacobian of the forward function of the model with respect to its inputs. Parameters: Name Type Description Default inputs Tensor Tensor with the input data to the forward function. required Returns: Type Description torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs )","title":"jac()"},{"location":"simulai_models/simulai_models_autoencoder/","text":"red { color: red } AutoEncoder # AutoencoderMLP # Bases: NetworkTemplate This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: Fully-connected encoder Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 class AutoencoderMLP ( NetworkTemplate ): r \"\"\"This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: - Fully-connected encoder - Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER \"\"\" def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict () def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary () def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy () __init__ ( encoder = None , decoder = None , input_dim = None , output_dim = None , latent_dim = None , activation = None , shallow = False , devices = 'cpu' , name = None ) # Initialize the AutoencoderMLP network Parameters: Name Type Description Default encoder DenseNetwork The encoder network architecture. (Default value = None) None decoder DenseNetwork The decoder network architecture. (Default value = None) None input_dim Optional [ int ] The input dimensions of the data, by default None. None output_dim Optional [ int ] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default \"cpu\". 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict () eval_projection ( input_data = None ) # Evaluate the projection of the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 168 169 170 171 172 173 174 175 176 177 178 179 180 def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy () forward ( input_data = None ) # Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed projection ( input_data = None ) # Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent reconstruction ( input_data = None ) # Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed summary () # Prints the summary of the network architecture Source code in simulai/models/_pytorch_models/_autoencoder.py 114 115 116 117 def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary () AutoencoderCNN # Bases: NetworkTemplate This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: The convolutional encoder The bottleneck stage, subdivided in: Fully-connected encoder Fully connected decoder The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 class AutoencoderCNN ( NetworkTemplate ): r \"\"\"This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: - The convolutional encoder - The bottleneck stage, subdivided in: - Fully-connected encoder - Fully connected decoder - The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data ) def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () __init__ ( encoder = None , bottleneck_encoder = None , bottleneck_decoder = None , decoder = None , encoder_activation = 'relu' , input_dim = None , output_dim = None , latent_dim = None , kernel_size = None , activation = None , channels = None , case = None , shallow = False , devices = 'cpu' , name = None , ** kwargs ) # Initialize the AutoencoderCNN network. Parameters: Name Type Description Default encoder ConvolutionalNetwork The encoder network architecture, by default None. None bottleneck_encoder Linear The bottleneck encoder network architecture, by default None. None bottleneck_decoder Linear The bottleneck decoder network architecture, by default None. None decoder ConvolutionalNetwork The decoder network architecture, by default None. None encoder_activation str The activation function used by the encoder network, by default 'relu'. 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimensions of the data, by default None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None kernel_size Optional [ int ] (Default value = None) None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None channels Optional [ int ] The number of channels of the convolutional layers, by default None. None case Optional [ str ] The type of convolutional encoder and decoder to be used, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default 'cpu'. 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () eval ( input_data = None ) # Evaluate the autoencoder on the given dataset. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be evaluated, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data ) forward ( input_data ) # Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed project ( input_data = None ) # Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 461 462 463 464 465 466 467 468 469 470 471 472 473 474 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () projection ( input_data ) # Project input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected. required Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent reconstruct ( input_data = None ) # Reconstructs the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be reconstructed. If not provided, uses the original input data, by default None. None Returns: Type Description ndarray np.ndarray: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 476 477 478 479 480 481 482 483 484 485 486 487 488 489 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () reconstruction ( input_data ) # Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed summary ( input_data = None , input_shape = None , verbose = True ) # Prints the summary of the network architecture. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. (Default value = None) None input_shape list The shape of the input data. (Default value = None) None verbose bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) AutoencoderKoopman # Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Koopman operator Fully connected decoder The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 class AutoencoderKoopman ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Koopman operator - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : if verbose == True : if self . input_dim != None : input_shape = list ( self . input_dim ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () print ( f \"The Koopman Operator has shape: { self . K_op . shape } \" ) self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the bottleneck encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the bottleneck decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m )) def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T ) def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy () def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () __init__ ( encoder = None , bottleneck_encoder = None , bottleneck_decoder = None , decoder = None , input_dim = None , output_dim = None , latent_dim = None , activation = None , channels = None , case = None , architecture = None , shallow = False , use_batch_norm = False , encoder_activation = 'relu' , devices = 'cpu' , name = None ) # Constructs a new instance of the Autoencoder Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None input_dim Optional [ Tuple [ int , ...]] The input dimensions. Used for automatic network generation. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions. Used for automatic network generation. Defaults to None. None latent_dim Optional [ int ] The latent dimensions. Used for automatic network generation. Defaults to None. None activation Optional [ Union [ list , str ]] The activation functions for each layer. Used for automatic network generation. Defaults to None. None channels Optional [ int ] The number of channels. Used for automatic network generation. Defaults to None. None case Optional [ str ] The type of problem. Used for automatic network generation. Defaults to None. None architecture Optional [ str ] The network architecture. Used for automatic network generation. Defaults to None. None shallow Optional [ bool ] Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. False use_batch_norm Optional [ bool ] (Default value = False) False encoder_activation str The activation function for the encoder. Defaults to \"relu\". 'relu' devices Union [ str , list ] The devices to use. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () latent_forward ( input_data = None ) # Evaluates the operation u^{u+1} = K u^{i} Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 820 821 822 823 824 825 826 827 828 829 830 831 832 def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T ) latent_forward_m ( input_data = None , m = 1 ) # Evaluates the operation $u^{u+m} = K^m u^{i}$ Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 805 806 807 808 809 810 811 812 813 814 815 816 817 818 def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m )) predict ( input_data = None , n_steps = 1 ) # Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None n_steps int The number of extrapolations to perform. Defaults to 1. 1 Returns: Type Description ndarray np.ndarray: The predicted reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy () project ( input_data = None ) # Projects the input data into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The projected data. Source code in simulai/models/_pytorch_models/_autoencoder.py 902 903 904 905 906 907 908 909 910 911 912 913 914 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () reconstruct ( input_data = None ) # Reconstructs the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () reconstruction_forward ( input_data = None ) # Evaluates the operation \u0168 = D(E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed reconstruction_forward_m ( input_data = None , m = 1 ) # Evaluates the operation \u0168_m = D(K^m E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m AutoencoderVariational # Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Gaussian noise Fully connected decoder The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 class AutoencoderVariational ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Gaussian noise - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder and bottleneck encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the bottleneck encoder applied to the input data. Note: This function is used for projection of the input data into the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection_with_bottleneck(input_data=input_data) \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the encoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection(input_data=input_data) \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the bottleneck decoder and decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the bottleneck decoder and decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the bottleneck decoder's output. Note: This function is used for reconstruction of the input data from the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> bottleneck_output = autoencoder._projection_with_bottleneck(input_data=input_data) >>> output_data = autoencoder._reconstruction_with_bottleneck(input_data=bottleneck_output) \"\"\" bottleneck_output = self . encoder_activation ( ( self . bottleneck_decoder . forward ( input_data = input_data )) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._reconstruction(input_data=input_data) \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent ) def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 ) def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy () CoVariance ( input_data = None , inv = False , to_numpy = False ) # Computes the covariance matrix of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the covariance matrix, by default None None inv bool If True, returns the inverse of the covariance matrix, by default False False to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance Mu ( input_data = None , to_numpy = False ) # Computes the mean of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the mean, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent ) Sigma ( input_data = None , to_numpy = False ) # Computes the standard deviation of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the standard deviation, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 ) __init__ ( encoder = None , bottleneck_encoder = None , bottleneck_decoder = None , decoder = None , encoder_activation = 'relu' , input_dim = None , output_dim = None , latent_dim = None , activation = None , channels = None , kernel_size = None , case = None , architecture = None , use_batch_norm = False , shallow = False , scale = 0.001 , devices = 'cpu' , name = None , ** kwargs ) # Constructor method. Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None encoder_activation str The activation function to use in the encoder. Defaults to \"relu\". 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimension of the data. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimension of the data. Defaults to None. None latent_dim Optional [ int ] The size of the bottleneck layer. Defaults to None. None activation Optional [ Union [ list , str ]] The activation function to use in the networks. Defaults to None. None channels Optional [ int ] The number of channels in the input data. Defaults to None. None kernel_size Optional [ int ] Convolutional kernel size. (Default value = None) None case Optional [ str ] The name of the autoencoder variant. Defaults to None. None architecture Optional [ str ] The architecture of the networks. Defaults to None. None use_batch_norm Optional [ bool ] (Default value = False) False shallow Optional [ bool ] Whether to use a shallow network architecture. Defaults to False. False scale float The scale of the initialization. Defaults to 1e-3. 0.001 devices Union [ str , list ] The device(s) to use for computation. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () eval ( input_data = None ) # Reconstructs the input data using the mean of the encoded data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy () latent_gaussian_noisy ( input_data = None ) # Generates a noisy latent representation of the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and generate a noisy latent representation, by default None None Returns: Type Description Tensor torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps project ( input_data = None ) # Projects the input data onto the autoencoder's latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to project onto the autoencoder's latent space, by default None None Returns: Type Description ndarray np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy () reconstruct ( input_data = None ) # Reconstructs the input data using the trained autoencoder. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () reconstruction_eval ( input_data = None ) # Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed reconstruction_forward ( input_data = None ) # Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed summary ( input_data = None , input_shape = None , verbose = True , display = True ) # Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] Input data to pass through the encoder, by default None None input_shape list The shape of the input data if input_data is None, by default None None verbose bool (Default value = True) True display bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Type Description Exception If self.input_dim is not a tuple or an integer. AssertionError If input_shape is None when input_data is None. Note The summary method calls the summary method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self )","title":"Simulai models autoencoder"},{"location":"simulai_models/simulai_models_autoencoder/#autoencoder","text":"","title":"AutoEncoder"},{"location":"simulai_models/simulai_models_autoencoder/#autoencodermlp","text":"Bases: NetworkTemplate This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: Fully-connected encoder Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 class AutoencoderMLP ( NetworkTemplate ): r \"\"\"This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: - Fully-connected encoder - Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER \"\"\" def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict () def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary () def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy ()","title":"AutoencoderMLP"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.__init__","text":"Initialize the AutoencoderMLP network Parameters: Name Type Description Default encoder DenseNetwork The encoder network architecture. (Default value = None) None decoder DenseNetwork The decoder network architecture. (Default value = None) None input_dim Optional [ int ] The input dimensions of the data, by default None. None output_dim Optional [ int ] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default \"cpu\". 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.eval_projection","text":"Evaluate the projection of the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 168 169 170 171 172 173 174 175 176 177 178 179 180 def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy ()","title":"eval_projection()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.forward","text":"Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.projection","text":"Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent","title":"projection()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.reconstruction","text":"Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed","title":"reconstruction()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.summary","text":"Prints the summary of the network architecture Source code in simulai/models/_pytorch_models/_autoencoder.py 114 115 116 117 def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary ()","title":"summary()"},{"location":"simulai_models/simulai_models_autoencoder/#autoencodercnn","text":"Bases: NetworkTemplate This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: The convolutional encoder The bottleneck stage, subdivided in: Fully-connected encoder Fully connected decoder The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 class AutoencoderCNN ( NetworkTemplate ): r \"\"\"This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: - The convolutional encoder - The bottleneck stage, subdivided in: - Fully-connected encoder - Fully connected decoder - The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data ) def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"AutoencoderCNN"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.__init__","text":"Initialize the AutoencoderCNN network. Parameters: Name Type Description Default encoder ConvolutionalNetwork The encoder network architecture, by default None. None bottleneck_encoder Linear The bottleneck encoder network architecture, by default None. None bottleneck_decoder Linear The bottleneck decoder network architecture, by default None. None decoder ConvolutionalNetwork The decoder network architecture, by default None. None encoder_activation str The activation function used by the encoder network, by default 'relu'. 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimensions of the data, by default None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None kernel_size Optional [ int ] (Default value = None) None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None channels Optional [ int ] The number of channels of the convolutional layers, by default None. None case Optional [ str ] The type of convolutional encoder and decoder to be used, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default 'cpu'. 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.eval","text":"Evaluate the autoencoder on the given dataset. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be evaluated, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data )","title":"eval()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.forward","text":"Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.project","text":"Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 461 462 463 464 465 466 467 468 469 470 471 472 473 474 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy ()","title":"project()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.projection","text":"Project input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected. required Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent","title":"projection()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.reconstruct","text":"Reconstructs the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be reconstructed. If not provided, uses the original input data, by default None. None Returns: Type Description ndarray np.ndarray: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 476 477 478 479 480 481 482 483 484 485 486 487 488 489 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"reconstruct()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.reconstruction","text":"Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed","title":"reconstruction()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.summary","text":"Prints the summary of the network architecture. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. (Default value = None) None input_shape list The shape of the input data. (Default value = None) None verbose bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self )","title":"summary()"},{"location":"simulai_models/simulai_models_autoencoder/#autoencoderkoopman","text":"Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Koopman operator Fully connected decoder The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 class AutoencoderKoopman ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Koopman operator - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : if verbose == True : if self . input_dim != None : input_shape = list ( self . input_dim ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () print ( f \"The Koopman Operator has shape: { self . K_op . shape } \" ) self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the bottleneck encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the bottleneck decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m )) def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T ) def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy () def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"AutoencoderKoopman"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.__init__","text":"Constructs a new instance of the Autoencoder Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None input_dim Optional [ Tuple [ int , ...]] The input dimensions. Used for automatic network generation. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions. Used for automatic network generation. Defaults to None. None latent_dim Optional [ int ] The latent dimensions. Used for automatic network generation. Defaults to None. None activation Optional [ Union [ list , str ]] The activation functions for each layer. Used for automatic network generation. Defaults to None. None channels Optional [ int ] The number of channels. Used for automatic network generation. Defaults to None. None case Optional [ str ] The type of problem. Used for automatic network generation. Defaults to None. None architecture Optional [ str ] The network architecture. Used for automatic network generation. Defaults to None. None shallow Optional [ bool ] Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. False use_batch_norm Optional [ bool ] (Default value = False) False encoder_activation str The activation function for the encoder. Defaults to \"relu\". 'relu' devices Union [ str , list ] The devices to use. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.latent_forward","text":"Evaluates the operation u^{u+1} = K u^{i} Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 820 821 822 823 824 825 826 827 828 829 830 831 832 def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T )","title":"latent_forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.latent_forward_m","text":"Evaluates the operation $u^{u+m} = K^m u^{i}$ Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 805 806 807 808 809 810 811 812 813 814 815 816 817 818 def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m ))","title":"latent_forward_m()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.predict","text":"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None n_steps int The number of extrapolations to perform. Defaults to 1. 1 Returns: Type Description ndarray np.ndarray: The predicted reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy ()","title":"predict()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.project","text":"Projects the input data into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The projected data. Source code in simulai/models/_pytorch_models/_autoencoder.py 902 903 904 905 906 907 908 909 910 911 912 913 914 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy ()","title":"project()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.reconstruct","text":"Reconstructs the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"reconstruct()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.reconstruction_forward","text":"Evaluates the operation \u0168 = D(E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"reconstruction_forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.reconstruction_forward_m","text":"Evaluates the operation \u0168_m = D(K^m E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m","title":"reconstruction_forward_m()"},{"location":"simulai_models/simulai_models_autoencoder/#autoencodervariational","text":"Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Gaussian noise Fully connected decoder The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 class AutoencoderVariational ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Gaussian noise - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder and bottleneck encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the bottleneck encoder applied to the input data. Note: This function is used for projection of the input data into the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection_with_bottleneck(input_data=input_data) \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the encoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection(input_data=input_data) \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the bottleneck decoder and decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the bottleneck decoder and decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the bottleneck decoder's output. Note: This function is used for reconstruction of the input data from the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> bottleneck_output = autoencoder._projection_with_bottleneck(input_data=input_data) >>> output_data = autoencoder._reconstruction_with_bottleneck(input_data=bottleneck_output) \"\"\" bottleneck_output = self . encoder_activation ( ( self . bottleneck_decoder . forward ( input_data = input_data )) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._reconstruction(input_data=input_data) \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent ) def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 ) def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"AutoencoderVariational"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.CoVariance","text":"Computes the covariance matrix of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the covariance matrix, by default None None inv bool If True, returns the inverse of the covariance matrix, by default False False to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance","title":"CoVariance()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.Mu","text":"Computes the mean of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the mean, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent )","title":"Mu()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.Sigma","text":"Computes the standard deviation of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the standard deviation, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 )","title":"Sigma()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.__init__","text":"Constructor method. Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None encoder_activation str The activation function to use in the encoder. Defaults to \"relu\". 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimension of the data. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimension of the data. Defaults to None. None latent_dim Optional [ int ] The size of the bottleneck layer. Defaults to None. None activation Optional [ Union [ list , str ]] The activation function to use in the networks. Defaults to None. None channels Optional [ int ] The number of channels in the input data. Defaults to None. None kernel_size Optional [ int ] Convolutional kernel size. (Default value = None) None case Optional [ str ] The name of the autoencoder variant. Defaults to None. None architecture Optional [ str ] The architecture of the networks. Defaults to None. None use_batch_norm Optional [ bool ] (Default value = False) False shallow Optional [ bool ] Whether to use a shallow network architecture. Defaults to False. False scale float The scale of the initialization. Defaults to 1e-3. 0.001 devices Union [ str , list ] The device(s) to use for computation. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.eval","text":"Reconstructs the input data using the mean of the encoded data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"eval()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.latent_gaussian_noisy","text":"Generates a noisy latent representation of the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and generate a noisy latent representation, by default None None Returns: Type Description Tensor torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps","title":"latent_gaussian_noisy()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.project","text":"Projects the input data onto the autoencoder's latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to project onto the autoencoder's latent space, by default None None Returns: Type Description ndarray np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy ()","title":"project()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.reconstruct","text":"Reconstructs the input data using the trained autoencoder. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"reconstruct()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.reconstruction_eval","text":"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"reconstruction_eval()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.reconstruction_forward","text":"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed","title":"reconstruction_forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.summary","text":"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] Input data to pass through the encoder, by default None None input_shape list The shape of the input data if input_data is None, by default None None verbose bool (Default value = True) True display bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Type Description Exception If self.input_dim is not a tuple or an integer. AssertionError If input_shape is None when input_data is None. Note The summary method calls the summary method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self )","title":"summary()"},{"location":"simulai_models/simulai_models_deeponet/","text":"red { color: red } DeepONets # DeepONet # Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_deeponet.py 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 class DeepONet ( NetworkTemplate ): name = \"deeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ] def _latent_dimension_is_correct ( self , dim : Union [ int , tuple ]) -> bool : \"\"\"It checks if the latent dimension is consistent. Args: dim (Union[int, tuple]): Latent_space_dimension. Returns: bool: The confirmation about the dimensionality correctness. \"\"\" if type ( dim ) == int : return True elif type ( dim ) == tuple : if len ( tuple ) == 1 : return True else : return False def _bias_compatibility_is_correct ( self , dim_trunk : Union [ int , tuple ], dim_branch : Union [ int , tuple ] ) -> bool : assert dim_branch == dim_trunk + self . var_dim , ( \"When using bias, the dimension\" + \"of the branch output should be\" + \"trunk output + var_dim.\" ) def _forward_dense ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a matrix-like product, it means, the trunk network outputs serve as \"interpolation basis\" for the branch outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_branch . shape [ - 1 ] / self . var_dim ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , self . var_dim , latent_dim ) ) output = torch . matmul ( output_branch_reshaped , output_trunk [ ... , None ]) output = torch . squeeze ( output ) return output def _forward_pointwise ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product, after that a reshaping is applied in order to produce multiple outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_trunk . shape [ - 1 ] / self . var_dim ) output_trunk_reshaped = torch . reshape ( output_trunk , ( - 1 , latent_dim , self . var_dim ) ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , latent_dim , self . var_dim ) ) output = torch . sum ( output_trunk_reshaped * output_branch_reshaped , dim =- 2 , keepdim = False ) return output def _forward_vanilla ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = torch . sum ( output_trunk * output_branch , dim =- 1 , keepdim = True ) return output def _forward_selector_ ( self ) -> callable : \"\"\"It selects the forward method to be used. Returns: callable : The callable corresponding to the required forward method. \"\"\" if self . var_dim > 1 : # It operates as a typical dense layer if self . product_type == \"dense\" : return self . _forward_dense # It executes an inner product by parts between the outputs # of the subnetworks branch and trunk else : return self . _forward_pointwise else : return self . _forward_vanilla @property def _var_map ( self ) -> dict : # It checks all the data arrays in self.var_map have the same # batches dimension batches_dimensions = set ([ value . shape [ 0 ] for value in self . var_map . values ()]) assert ( len ( batches_dimensions ) == 1 ), \"This dataset is not proper to apply shuffling\" dim = list ( batches_dimensions )[ 0 ] indices = np . arange ( dim ) np . random . shuffle ( indices ) var_map_shuffled = { key : value [ indices ] for key , value in self . var_map . items ()} return var_map_shuffled @property def weights ( self ) -> list : return sum ([ net . weights for net in self . subnetworks ], []) # Now, a sequence of wrappers def _wrapper_bias_inactive ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return output def _wrapper_bias_active ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output_branch_ = output_branch [:, : - self . var_dim ] bias = output_branch [:, - self . var_dim :] output = ( self . _forward ( output_trunk = output_trunk , output_branch = output_branch_ ) + bias ) return output def _wrapper_decoder_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return self . decoder_network . forward ( input_data = input_data ) def _wrapper_decoder_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def _wrapper_rescale_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data * self . rescale_factors def _wrapper_rescale_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output )) @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy () @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary () __init__ ( trunk_network = None , branch_network = None , decoder_network = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , model_id = None , use_bias = False ) # Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional). (Default value = None) None devices Union [ str , list ] Devices in which the model will be executed. (Default value = \"cpu\") 'cpu' product_type str Type of product to execute in the embedding space. (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None model_id str Name for the model (Default value = None) None use_bias bool (Default value = False) False Source code in simulai/models/_pytorch_models/_deeponet.py 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ] eval ( trunk_data = None , branch_data = None ) # It uses the network to make evaluations. Parameters: Name Type Description Default trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy () eval_subnetwork ( name = None , input_data = None ) # It evaluates the output of DeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None input_data Union [ ndarray , Tensor ] The data used as input for the subnetwork. (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy () forward ( input_trunk = None , input_branch = None ) # Wrapper forward method. Parameters: Name Type Description Default input_trunk Union [ ndarray , Tensor ] (Default value = None) None input_branch Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description Tensor torch.Tensor: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output )) ResDeepONet # Bases: DeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 class ResDeepONet ( DeepONet ): name = \"resdeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual def _forward_default ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method which considers the network a residual operation. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual def _forward_multiplied_by_trunk ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method with multiplication by the trunk embedding. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual * input_trunk def _forward_cut_residual ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Forward method in which the residual operation is ignored. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return output __init__ ( trunk_network = None , branch_network = None , decoder_network = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , residual = True , multiply_by_trunk = False , model_id = None , use_bias = False ) # Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional) (Default value = None) None # Union [ str , list ] (Default value = \"cpu\") required product_type str Type of product to execute in the embedding space (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None residual bool Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) True multiply_by_trunk bool Multiply the output by the trunk input or not. NOTE: if the option 'residual' False is activated it is performed after the multiplication: output*trunk_input + branch_input (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual ImprovedDeepONet # Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 class ImprovedDeepONet ( ResDeepONet ): name = \"improveddeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved def _forward_improved ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Improved forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution v = self . encoder_trunk . forward ( input_data = input_trunk ) u = self . encoder_branch . forward ( input_data = input_branch ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk , u = u , v = v ), device = self . device , ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch , u = u , v = v ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () ) def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Encoder Trunk:\" ) self . encoder_trunk . summary () print ( \"Encoder Branch:\" ) self . encoder_branch . summary () print ( \"Branch Network:\" ) self . branch_network . summary () __init__ ( trunk_network = None , branch_network = None , decoder_network = None , encoder_trunk = None , encoder_branch = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , residual = False , multiply_by_trunk = False , model_id = None , use_bias = False ) # The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Parameters: Name Type Description Default trunk_network ConvexDenseNetwork Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network ConvexDenseNetwork Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None encoder_trunk NetworkTemplate Shallow subnework used to map the trunk input to an auxiliary embedding None employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved eval_subnetwork ( name = None , trunk_data = None , branch_data = None ) # It evaluates the output of ImprovedDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () ) FlexibleDeepONet # Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 class FlexibleDeepONet ( ResDeepONet ): name = \"flexibledeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ] def _rescaling_operation ( self , input_data : torch . Tensor = None , rescaling_tensor : torch . Tensor = None ): angular = rescaling_tensor [:, : self . t_is ] linear = rescaling_tensor [:, self . t_is :] return angular * input_data + linear def _forward_flexible ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Flexible forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch ), device = self . device , ) rescaling = self . to_wrap ( entity = self . pre_network . forward ( input_data = input_branch ), device = self . device ) input_trunk_rescaled = self . _rescaling_operation ( input_data = input_trunk , rescaling_tensor = rescaling ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk_rescaled ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Pre Network:\" ) self . pre_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary () __init__ ( trunk_network = None , branch_network = None , decoder_network = None , pre_network = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , residual = False , multiply_by_trunk = False , model_id = None , use_bias = False ) # Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None pre_network NetworkTemplate Subnework used to predict rescaling parameters for the trunk input None accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ] eval_subnetwork ( name = None , trunk_data = None , branch_data = None ) # It evaluates the output of FlexibleDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"Simulai models deeponet"},{"location":"simulai_models/simulai_models_deeponet/#deeponets","text":"","title":"DeepONets"},{"location":"simulai_models/simulai_models_deeponet/#deeponet","text":"Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_deeponet.py 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 class DeepONet ( NetworkTemplate ): name = \"deeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ] def _latent_dimension_is_correct ( self , dim : Union [ int , tuple ]) -> bool : \"\"\"It checks if the latent dimension is consistent. Args: dim (Union[int, tuple]): Latent_space_dimension. Returns: bool: The confirmation about the dimensionality correctness. \"\"\" if type ( dim ) == int : return True elif type ( dim ) == tuple : if len ( tuple ) == 1 : return True else : return False def _bias_compatibility_is_correct ( self , dim_trunk : Union [ int , tuple ], dim_branch : Union [ int , tuple ] ) -> bool : assert dim_branch == dim_trunk + self . var_dim , ( \"When using bias, the dimension\" + \"of the branch output should be\" + \"trunk output + var_dim.\" ) def _forward_dense ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a matrix-like product, it means, the trunk network outputs serve as \"interpolation basis\" for the branch outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_branch . shape [ - 1 ] / self . var_dim ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , self . var_dim , latent_dim ) ) output = torch . matmul ( output_branch_reshaped , output_trunk [ ... , None ]) output = torch . squeeze ( output ) return output def _forward_pointwise ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product, after that a reshaping is applied in order to produce multiple outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_trunk . shape [ - 1 ] / self . var_dim ) output_trunk_reshaped = torch . reshape ( output_trunk , ( - 1 , latent_dim , self . var_dim ) ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , latent_dim , self . var_dim ) ) output = torch . sum ( output_trunk_reshaped * output_branch_reshaped , dim =- 2 , keepdim = False ) return output def _forward_vanilla ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = torch . sum ( output_trunk * output_branch , dim =- 1 , keepdim = True ) return output def _forward_selector_ ( self ) -> callable : \"\"\"It selects the forward method to be used. Returns: callable : The callable corresponding to the required forward method. \"\"\" if self . var_dim > 1 : # It operates as a typical dense layer if self . product_type == \"dense\" : return self . _forward_dense # It executes an inner product by parts between the outputs # of the subnetworks branch and trunk else : return self . _forward_pointwise else : return self . _forward_vanilla @property def _var_map ( self ) -> dict : # It checks all the data arrays in self.var_map have the same # batches dimension batches_dimensions = set ([ value . shape [ 0 ] for value in self . var_map . values ()]) assert ( len ( batches_dimensions ) == 1 ), \"This dataset is not proper to apply shuffling\" dim = list ( batches_dimensions )[ 0 ] indices = np . arange ( dim ) np . random . shuffle ( indices ) var_map_shuffled = { key : value [ indices ] for key , value in self . var_map . items ()} return var_map_shuffled @property def weights ( self ) -> list : return sum ([ net . weights for net in self . subnetworks ], []) # Now, a sequence of wrappers def _wrapper_bias_inactive ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return output def _wrapper_bias_active ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output_branch_ = output_branch [:, : - self . var_dim ] bias = output_branch [:, - self . var_dim :] output = ( self . _forward ( output_trunk = output_trunk , output_branch = output_branch_ ) + bias ) return output def _wrapper_decoder_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return self . decoder_network . forward ( input_data = input_data ) def _wrapper_decoder_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def _wrapper_rescale_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data * self . rescale_factors def _wrapper_rescale_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output )) @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy () @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary ()","title":"DeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.__init__","text":"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional). (Default value = None) None devices Union [ str , list ] Devices in which the model will be executed. (Default value = \"cpu\") 'cpu' product_type str Type of product to execute in the embedding space. (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None model_id str Name for the model (Default value = None) None use_bias bool (Default value = False) False Source code in simulai/models/_pytorch_models/_deeponet.py 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ]","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.eval","text":"It uses the network to make evaluations. Parameters: Name Type Description Default trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy ()","title":"eval()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.eval_subnetwork","text":"It evaluates the output of DeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None input_data Union [ ndarray , Tensor ] The data used as input for the subnetwork. (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy ()","title":"eval_subnetwork()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.forward","text":"Wrapper forward method. Parameters: Name Type Description Default input_trunk Union [ ndarray , Tensor ] (Default value = None) None input_branch Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description Tensor torch.Tensor: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output ))","title":"forward()"},{"location":"simulai_models/simulai_models_deeponet/#resdeeponet","text":"Bases: DeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 class ResDeepONet ( DeepONet ): name = \"resdeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual def _forward_default ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method which considers the network a residual operation. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual def _forward_multiplied_by_trunk ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method with multiplication by the trunk embedding. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual * input_trunk def _forward_cut_residual ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Forward method in which the residual operation is ignored. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return output","title":"ResDeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.ResDeepONet.__init__","text":"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional) (Default value = None) None # Union [ str , list ] (Default value = \"cpu\") required product_type str Type of product to execute in the embedding space (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None residual bool Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) True multiply_by_trunk bool Multiply the output by the trunk input or not. NOTE: if the option 'residual' False is activated it is performed after the multiplication: output*trunk_input + branch_input (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#improveddeeponet","text":"Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 class ImprovedDeepONet ( ResDeepONet ): name = \"improveddeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved def _forward_improved ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Improved forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution v = self . encoder_trunk . forward ( input_data = input_trunk ) u = self . encoder_branch . forward ( input_data = input_branch ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk , u = u , v = v ), device = self . device , ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch , u = u , v = v ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () ) def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Encoder Trunk:\" ) self . encoder_trunk . summary () print ( \"Encoder Branch:\" ) self . encoder_branch . summary () print ( \"Branch Network:\" ) self . branch_network . summary ()","title":"ImprovedDeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.ImprovedDeepONet.__init__","text":"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Parameters: Name Type Description Default trunk_network ConvexDenseNetwork Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network ConvexDenseNetwork Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None encoder_trunk NetworkTemplate Shallow subnework used to map the trunk input to an auxiliary embedding None employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.ImprovedDeepONet.eval_subnetwork","text":"It evaluates the output of ImprovedDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () )","title":"eval_subnetwork()"},{"location":"simulai_models/simulai_models_deeponet/#flexibledeeponet","text":"Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 class FlexibleDeepONet ( ResDeepONet ): name = \"flexibledeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ] def _rescaling_operation ( self , input_data : torch . Tensor = None , rescaling_tensor : torch . Tensor = None ): angular = rescaling_tensor [:, : self . t_is ] linear = rescaling_tensor [:, self . t_is :] return angular * input_data + linear def _forward_flexible ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Flexible forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch ), device = self . device , ) rescaling = self . to_wrap ( entity = self . pre_network . forward ( input_data = input_branch ), device = self . device ) input_trunk_rescaled = self . _rescaling_operation ( input_data = input_trunk , rescaling_tensor = rescaling ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk_rescaled ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Pre Network:\" ) self . pre_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary ()","title":"FlexibleDeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.FlexibleDeepONet.__init__","text":"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None pre_network NetworkTemplate Subnework used to predict rescaling parameters for the trunk input None accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ]","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.FlexibleDeepONet.eval_subnetwork","text":"It evaluates the output of FlexibleDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"eval_subnetwork()"},{"location":"simulai_models/simulai_models_transformer/","text":"red { color: red } Transformer # Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_transformer.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 class Transformer ( NetworkTemplate ): def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self ) __init__ ( num_heads_encoder = 1 , num_heads_decoder = 1 , embed_dim_encoder = Union [ int , Tuple ], embed_dim_decoder = Union [ int , Tuple ], encoder_activation = 'relu' , decoder_activation = 'relu' , encoder_mlp_layer_config = None , decoder_mlp_layer_config = None , number_of_encoders = 1 , number_of_decoders = 1 ) # A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Parameters: Name Type Description Default num_heads_encoder int The number of heads for the self-attention layer of the encoder. (Default value = 1) 1 num_heads_decoder int The number of heads for the self-attention layer of the decoder. (Default value = 1) 1 embed_dim_encoder int The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] embed_dim_decoder int The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] encoder_activation Union [ str , Module ] The activation to be used in all the encoder layers. (Default value = 'relu') 'relu' decoder_activation Union [ str , Module ] The activation to be used in all the decoder layers. (Default value = 'relu') 'relu' encoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None decoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None number_of_encoders int The number of encoders to be used. (Default value = 1) 1 number_of_decoders int The number of decoders to be used. (Default value = 1) 1 Source code in simulai/models/_pytorch_models/_transformer.py 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d ) forward ( input_data = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input dataset. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The transformer output. Source code in simulai/models/_pytorch_models/_transformer.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output summary () # It prints a general view of the architecture. Source code in simulai/models/_pytorch_models/_transformer.py 296 297 298 299 def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self )","title":"Simulai models transformer"},{"location":"simulai_models/simulai_models_transformer/#transformer","text":"Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_transformer.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 class Transformer ( NetworkTemplate ): def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self )","title":"Transformer"},{"location":"simulai_models/simulai_models_transformer/#simulai.models.Transformer.__init__","text":"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Parameters: Name Type Description Default num_heads_encoder int The number of heads for the self-attention layer of the encoder. (Default value = 1) 1 num_heads_decoder int The number of heads for the self-attention layer of the decoder. (Default value = 1) 1 embed_dim_encoder int The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] embed_dim_decoder int The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] encoder_activation Union [ str , Module ] The activation to be used in all the encoder layers. (Default value = 'relu') 'relu' decoder_activation Union [ str , Module ] The activation to be used in all the decoder layers. (Default value = 'relu') 'relu' encoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None decoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None number_of_encoders int The number of encoders to be used. (Default value = 1) 1 number_of_decoders int The number of decoders to be used. (Default value = 1) 1 Source code in simulai/models/_pytorch_models/_transformer.py 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d )","title":"__init__()"},{"location":"simulai_models/simulai_models_transformer/#simulai.models.Transformer.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input dataset. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The transformer output. Source code in simulai/models/_pytorch_models/_transformer.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output","title":"forward()"},{"location":"simulai_models/simulai_models_transformer/#simulai.models.Transformer.summary","text":"It prints a general view of the architecture. Source code in simulai/models/_pytorch_models/_transformer.py 296 297 298 299 def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self )","title":"summary()"},{"location":"simulai_models/simulai_models_unet/","text":"red { color: red } U-Net # Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_unet.py 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 class UNet ( NetworkTemplate ): def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self ) __init__ ( layers_config = None , intermediary_outputs_indices = None , intermediary_inputs_indices = None , encoder_extra_args = dict (), decoder_extra_args = dict ()) # U-Net. Parameters: Name Type Description Default layers_config Dict A dictionary containing the complete configuration for the None intermediary_outputs_indices List [ int ] A list of indices for indicating the encoder outputs. (Default value = None) None intermediary_inputs_indices List [ int ] A list of indices for indicating the decoder inputs. (Default value = None) None encoder_extra_args Dict A dictionary containing extra arguments for the encoder. (Default value = dict()) dict () decoder_extra_args Dict A dictionary containing extra arguments for the decoder. (Default value = dict()) dict () Source code in simulai/models/_pytorch_models/_unet.py 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) forward ( input_data = None ) # The U-Net forward method. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] A dataset to be inputted in the CNN U-Net encoder. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The U-Net output. Source code in simulai/models/_pytorch_models/_unet.py 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output summary () # It shows a general view of the architecture. Source code in simulai/models/_pytorch_models/_unet.py 280 281 282 283 def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self )","title":"Simulai models unet"},{"location":"simulai_models/simulai_models_unet/#u-net","text":"Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_unet.py 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 class UNet ( NetworkTemplate ): def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self )","title":"U-Net"},{"location":"simulai_models/simulai_models_unet/#simulai.models.UNet.__init__","text":"U-Net. Parameters: Name Type Description Default layers_config Dict A dictionary containing the complete configuration for the None intermediary_outputs_indices List [ int ] A list of indices for indicating the encoder outputs. (Default value = None) None intermediary_inputs_indices List [ int ] A list of indices for indicating the decoder inputs. (Default value = None) None encoder_extra_args Dict A dictionary containing extra arguments for the encoder. (Default value = dict()) dict () decoder_extra_args Dict A dictionary containing extra arguments for the decoder. (Default value = dict()) dict () Source code in simulai/models/_pytorch_models/_unet.py 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder )","title":"__init__()"},{"location":"simulai_models/simulai_models_unet/#simulai.models.UNet.forward","text":"The U-Net forward method. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] A dataset to be inputted in the CNN U-Net encoder. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The U-Net output. Source code in simulai/models/_pytorch_models/_unet.py 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output","title":"forward()"},{"location":"simulai_models/simulai_models_unet/#simulai.models.UNet.summary","text":"It shows a general view of the architecture. Source code in simulai/models/_pytorch_models/_unet.py 280 281 282 283 def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self )","title":"summary()"},{"location":"simulai_optimization/simulai_optimizer/","text":"Optimizer # Optimizer # Source code in simulai/optimization/_optimization.py 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 class Optimizer : def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False def _verify_GPU_memory_availability ( self , device : str = None ): total = torch . cuda . get_device_properties ( device ) . total_memory reserved = torch . cuda . memory_reserved ( device ) allocated = torch . cuda . memory_allocated ( device ) return total - reserved - allocated def _try_to_transfer_to_GPU ( self , data : Union [ dict , torch . Tensor ], device : str = None ) -> None : available_GPU_memory = self . _verify_GPU_memory_availability ( device = device ) if isinstance ( data , dict ): data_size = sum ([ t . element_size () * t . nelement () for t in data . values ()]) if data_size < available_GPU_memory : data_ = { k : t . to ( device ) for k , t in data . items ()} print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data elif isinstance ( data , torch . Tensor ): data_size = data . element_size () * data . nelement () if data_size < available_GPU_memory : data_ = data . to ( device ) print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data else : return data def _seek_by_extra_trainable_parameters ( self , residual : SymbolicOperator = None ) -> Union [ list , None ]: if hasattr ( residual , \"constants\" ): extra_parameters = [ c for c in residual . trainable_parameters . values () if isinstance ( c , Parameter ) ] if extra_parameters : print ( \"There are extra trainable parameters.\" ) return extra_parameters else : return None def _get_lr_decay ( self ) -> Union [ callable , None ]: if self . lr_decay_scheduler_params is not None : name = self . lr_decay_scheduler_params . pop ( \"name\" ) self . decay_frequency = self . lr_decay_scheduler_params . pop ( \"decay_frequency\" ) lr_class = getattr ( torch . optim . lr_scheduler , name ) return lr_class else : return None def _exec_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . randperm ( size ) def _summary_writer ( self , loss_states : dict = None , epoch : int = None ) -> None : for k , v in loss_states . items (): loss = v [ epoch ] self . writer . add_scalar ( k , loss , epoch ) # It handles early-stopping for the optimization loop def _early_stopping_handler ( self , val_loss_function : callable = None ) -> None : loss = val_loss_function () self . accuracy_str = \"acc: {} \" . format ( loss ) if loss < self . validation_score : self . validation_score = loss self . awaited_steps = 0 return False elif ( loss > self . validation_score ) and ( self . awaited_steps <= self . early_stopping_params [ \"patience\" ] ): self . validation_score = loss self . awaited_steps += 1 return False else : print ( \"Early-stopping was actioned.\" ) return True def _lr_decay_handler ( self , epoch : int = None ): if ( epoch % self . decay_frequency == 0 ) and ( epoch > 0 ): self . lr_decay_scheduler . step () def _checkpoint_handler ( self , save_dir : str = None , name : str = None , model : NetworkTemplate = None , template : callable = None , compact : bool = False , epoch : int = None , ) -> None : if epoch % self . checkpoint_frequency == 0 : tag = self . overwrite_rule ( epoch ) saver = SPFile ( compact = compact ) saver . write ( save_dir = save_dir , name = name + tag , model = model , template = template ) def _no_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . arange ( size ) def _bypass_summary_writer ( self , ** kwargs ) -> None : pass # Doing nothing to early-stopping def _bypass_stop_handler ( self , ** kwargs ): return False # Doing nothing with lr def _bypass_lr_decay_handler ( self , ** kwargs ): pass # Doing nothing to checkpoint def _bypass_checkpoint_handler ( self , ** kwargs ): pass # When data is a NumPy array def _get_vector_data ( self , dataset : Union [ np . ndarray , torch . Tensor ] = None , indices : np . ndarray = None , ) -> torch . Tensor : if dataset is None : return None elif isinstance ( dataset , Dataset ): return dataset ()[ indices ] else : return dataset [ indices ] # When data is stored in a HDF5 dataset def _get_ondisk_data ( self , dataset : callable = None , indices : np . ndarray = None ) -> torch . Tensor : return dataset ( indices = indices ) # Preparing the batches (converting format and moving to the correct device) # in a single batch optimization loop def _make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : item . to ( device ) for key , item in input_data . items ()} else : input_data_dict = { self . input_data_name : input_data . to ( device )} return input_data_dict # Preparing the batches (converting format and moving to the correct device) def _batchwise_make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" , batch_indices : torch . Tensor = None , ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : self . get_data ( dataset = item , indices = batch_indices ) . to ( device ) for key , item in input_data . items () } else : input_data_dict = { self . input_data_name : self . get_data ( dataset = input_data , indices = batch_indices ) . to ( device ) } return input_data_dict # Getting up optimizer from the supported engines def _get_optimizer ( self , optimizer : str = None ) -> torch . nn . Module : try : for optim_module in self . optim_modules : mod_items = dir ( optim_module ) mod_items_lower = [ item . lower () for item in mod_items ] if optimizer in mod_items_lower : print ( f \"Optimizer { optimizer } found in { optim_module . __name__ } .\" ) optimizer_name = mod_items [ mod_items_lower . index ( optimizer )] return getattr ( optim_module , optimizer_name ) else : print ( f \"Optimizer { optimizer } not found in { optim_module . __name__ } .\" ) except : raise Exception ( f \"There is no correspondent to { optimizer } in any known optimization module.\" ) # Getting up loss function from the correspondent module def _get_loss ( self , loss : str = None ) -> callable : if type ( loss ) == str : name = loss . upper () return getattr ( self . losses_module , name + \"Loss\" ) elif callable ( loss ): return loss else : return f \"loss must be str or callable, but received { type ( loss ) } \" # Single batch optimization loop def _optimization_loop ( self , n_epochs : int = None , loss_function : callable = None , op : NetworkTemplate = None , loss_states : dict = None , validation_loss_function : callable = None , ) -> None : for epoch in range ( n_epochs ): self . optimizer_instance . zero_grad () self . optimizer_instance . step ( loss_function ) self . checkpoint_handler ( model = op , epoch = epoch , ** self . checkpoint_params ) self . summary_writer ( loss_states = loss_states , epoch = epoch ) self . lr_decay_handler ( epoch = epoch ) self . loss_states = loss_states # Basic version of the mini-batch optimization loop # TODO It could be parallelized def _batchwise_optimization_loop ( self , n_epochs : int = None , batch_size : int = None , loss : Union [ str , type ] = None , op : NetworkTemplate = None , input_data : torch . Tensor = None , target_data : torch . Tensor = None , validation_data : Tuple [ torch . Tensor ] = None , params : dict = None , device : str = \"cpu\" , ) -> None : print ( \"Executing batchwise optimization loop.\" ) if isinstance ( loss , str ): loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) else : assert isinstance ( loss , type ), \"The object provided is not a LossBasics object.\" loss_class = loss try : loss_instance = loss_class ( operator = op ) except : raise Exception ( f \"It was not possible to instantiate the class { loss } .\" ) if validation_data is not None : validation_input_data , validation_target_data = validation_data validation_input_data = self . _make_input_data ( validation_input_data , device = device ) validation_target_data = validation_target_data . to ( device ) val_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : val_loss_function = None batches = np . array_split ( np . arange ( self . n_samples ), int ( self . n_samples / batch_size ) ) # Number of batchwise optimization epochs n_batch_epochs = len ( batches ) epoch = 0 # Outer loop iteration b_epoch = 0 # Total iteration stop_criterion = False # When using mini-batches, it is necessary to # determine the number of iterations for the outer optimization # loop if n_batch_epochs > n_epochs : n_epochs_global = 1 else : n_epochs_global = int ( math . ceil ( n_epochs / n_batch_epochs )) while epoch < n_epochs_global and stop_criterion == False : # For each batch-wise realization it is possible to determine a # new permutation for the samples samples_permutation = self . sampler ( size = self . n_samples ) for ibatch in batches : self . optimizer_instance . zero_grad () indices = samples_permutation [ ibatch ] input_batch = self . _batchwise_make_input_data ( input_data , device = device , batch_indices = indices ) target_batch = self . get_data ( dataset = target_data , indices = indices ) if target_batch is not None : target_batch = target_batch . to ( device ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_batch , target_data = target_batch , call_back = self . accuracy_str , ** params , ) self . optimizer_instance . step ( loss_function ) self . summary_writer ( loss_states = loss_instance . loss_states , epoch = b_epoch ) self . checkpoint_handler ( model = op , epoch = b_epoch , ** self . checkpoint_params ) self . lr_decay_handler ( epoch = b_epoch ) stop_criterion = self . stop_handler ( val_loss_function = val_loss_function ) b_epoch += 1 epoch += 1 if hasattr ( loss_instance , \"loss_states\" ): if all ( [ isinstance ( item , list ) for item in loss_instance . loss_states . values ()] ): self . loss_states = { key : np . hstack ( value ) for key , value in loss_instance . loss_states . items () } else : self . loss_states = loss_instance . loss_states # Main fit method @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , ) __init__ ( optimizer = None , early_stopping = False , summary_writer = False , shuffle = True , lr_decay_scheduler_params = None , params = None , early_stopping_params = None , checkpoint_params = None ) # Parameters: Name Type Description Default optimizer str A name for a PyTorch optimizer. None early_stopping bool Early-stopping will be used or not. False summary_writer bool Write a Tensorboard run file or not. False shuffle bool Shuffle the dataset or not. True lr_decay_scheduler_params dict The parameters used for defining a learning rate decay scheme. None params dict Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). None early_stopping_params dict Parameters required by the early-stopping scheme. None checkpoint_params dict Parameters for configuring the checkpointing scheme. None Source code in simulai/optimization/_optimization.py 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False fit ( op = None , input_data = None , target_data = None , validation_data = None , n_epochs = None , loss = 'rmse' , params = None , batch_size = None , device = 'cpu' , distributed = False , use_jit = False ) # Parameters: Name Type Description Default op NetworkTemplate The model which will be trained None input_data Union [ dict , Tensor , ndarray , callable ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray , callable ] The target data for the problem. None validation_data Tuple [ Union [ Tensor , ndarray , callable ]] The validation data used for the problem (if required). None n_epochs int Number of epochs for the optimization process. None loss str A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray 'rmse' params dict Extra parameters required for task-specific problems (as Physics-informed neural networks). None batch_size int The size of the batch used in each optimization epoch None device str The device in which the optimization will run, 'cpu' or 'gpu'. 'cpu' distributed bool Use distributed (multi-node) training or not. False use_jit bool Use PyTorch JIT (Just in time compilation) or not. False Source code in simulai/optimization/_optimization.py 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , ) ScipyInterface # Source code in simulai/optimization/_optimization.py 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 class ScipyInterface : def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64 def _stack_and_convert_parameters ( self , parameters : List [ Union [ torch . Tensor , np . ndarray ]] ) -> np . ndarray : \"\"\" It produces a stack of all the model parameters. Args: parameters (List[Union[torch.Tensor, np.ndarray]]): A list containing all the model parameters in their original shapes. Returns: np.ndarray: A stack (single vertical array) of all the model parameters. \"\"\" return np . hstack ( [ param . detach () . numpy () . astype ( np . float64 ) . flatten () for param in parameters . values () ] ) def _update_and_set_parameters ( self , parameters : np . ndarray ) -> None : \"\"\" It updates the parameters with the new values estimated by the optimizer. Args: parameters (np.ndarray): The stack of all the model parameters. \"\"\" operators = [ torch . from_numpy ( parameters [ slice ( * interval )] . reshape ( shape ) . astype ( self . default_dtype ) ) . to ( self . device ) for interval , shape in zip ( self . operators_intervals , self . operators_shapes . values () ) ] for opi , parameter in enumerate ( self . fun . parameters ()): parameter . data . copy_ ( operators [ opi ]) def _exec_kwargs_forward ( self , input_data : dict = None ): \"\"\"It executes the forward pass for the model when it receives more than one input. Args: input_data dict: Data to be passed to the model. \"\"\" return self . fun . forward ( ** input_data ) def _exec_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ): \"\"\"It executes the forward pass for the model. Args: input_data (Union[np.ndarray, torch.Tensor]): Data to be passed to the model. \"\"\" return self . fun . forward ( input_data = input_data ) def _fun_num ( self , parameters : np . ndarray ) -> Tuple [ float ]: \"\"\" Args: parameters (np.ndarray): The stacked parameters defined for the model. Returns: Tuple[float]: The loss(es) defined for the optimization process. \"\"\" self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () return loss . detach () . cpu () . numpy () . astype ( np . float64 ) def _fun ( self , parameters : np . ndarray ) -> Tuple [ float , np . ndarray ]: \"\"\" Args: parameters (np.ndarray): The stack of all the trainable parameters for the model. Returns: Tuple[float, np.ndarray]: A tuple containing the value for the loss function and the array of gradients for the model parameters. \"\"\" # Setting the new values for the model parameters self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () grads = [ v . grad . detach () . cpu () . numpy () for v in self . fun . parameters ()] gradients = np . hstack ( [ v . flatten () for v , shape in zip ( grads , list ( self . operators_shapes . values ())) ] ) return loss . detach () . cpu () . numpy () . astype ( np . float64 ), gradients . astype ( np . float64 ) def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x ) __init__ ( fun = None , optimizer = None , optimizer_config = dict (), loss = None , loss_config = None , device = 'cpu' , jac = None ) # An interface for using SciPy-defined optimization algorithms. Parameters: Name Type Description Default fun NetworkTemplate A model (neural network) to be trained. None optimizer str A name for an optimizar available on SciPy. None optimizer_config dict A configuration dictionary for the chosen optimizer. dict () loss callable A loss function implemented in the form of a Python function or class. None loss_config dict A configuration dictionary for the loss function. None device str The device in which the optimization will be executed ('cpu' or 'gpu'). 'cpu' jac str If necessary, define a method for evaluating the Jacobian available on SciPy. None Raises: Type Description Exception If a not recognized device is defined as 'device'. Source code in simulai/optimization/_optimization.py 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64 fit ( input_data = None , target_data = None ) # Parameters: Name Type Description Default input_data Union [ dict , Tensor , ndarray ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray ] The target data used for training the model. None Source code in simulai/optimization/_optimization.py 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x )","title":"Optimizer"},{"location":"simulai_optimization/simulai_optimizer/#optimizer","text":"","title":"Optimizer"},{"location":"simulai_optimization/simulai_optimizer/#optimizer_1","text":"Source code in simulai/optimization/_optimization.py 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 class Optimizer : def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False def _verify_GPU_memory_availability ( self , device : str = None ): total = torch . cuda . get_device_properties ( device ) . total_memory reserved = torch . cuda . memory_reserved ( device ) allocated = torch . cuda . memory_allocated ( device ) return total - reserved - allocated def _try_to_transfer_to_GPU ( self , data : Union [ dict , torch . Tensor ], device : str = None ) -> None : available_GPU_memory = self . _verify_GPU_memory_availability ( device = device ) if isinstance ( data , dict ): data_size = sum ([ t . element_size () * t . nelement () for t in data . values ()]) if data_size < available_GPU_memory : data_ = { k : t . to ( device ) for k , t in data . items ()} print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data elif isinstance ( data , torch . Tensor ): data_size = data . element_size () * data . nelement () if data_size < available_GPU_memory : data_ = data . to ( device ) print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data else : return data def _seek_by_extra_trainable_parameters ( self , residual : SymbolicOperator = None ) -> Union [ list , None ]: if hasattr ( residual , \"constants\" ): extra_parameters = [ c for c in residual . trainable_parameters . values () if isinstance ( c , Parameter ) ] if extra_parameters : print ( \"There are extra trainable parameters.\" ) return extra_parameters else : return None def _get_lr_decay ( self ) -> Union [ callable , None ]: if self . lr_decay_scheduler_params is not None : name = self . lr_decay_scheduler_params . pop ( \"name\" ) self . decay_frequency = self . lr_decay_scheduler_params . pop ( \"decay_frequency\" ) lr_class = getattr ( torch . optim . lr_scheduler , name ) return lr_class else : return None def _exec_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . randperm ( size ) def _summary_writer ( self , loss_states : dict = None , epoch : int = None ) -> None : for k , v in loss_states . items (): loss = v [ epoch ] self . writer . add_scalar ( k , loss , epoch ) # It handles early-stopping for the optimization loop def _early_stopping_handler ( self , val_loss_function : callable = None ) -> None : loss = val_loss_function () self . accuracy_str = \"acc: {} \" . format ( loss ) if loss < self . validation_score : self . validation_score = loss self . awaited_steps = 0 return False elif ( loss > self . validation_score ) and ( self . awaited_steps <= self . early_stopping_params [ \"patience\" ] ): self . validation_score = loss self . awaited_steps += 1 return False else : print ( \"Early-stopping was actioned.\" ) return True def _lr_decay_handler ( self , epoch : int = None ): if ( epoch % self . decay_frequency == 0 ) and ( epoch > 0 ): self . lr_decay_scheduler . step () def _checkpoint_handler ( self , save_dir : str = None , name : str = None , model : NetworkTemplate = None , template : callable = None , compact : bool = False , epoch : int = None , ) -> None : if epoch % self . checkpoint_frequency == 0 : tag = self . overwrite_rule ( epoch ) saver = SPFile ( compact = compact ) saver . write ( save_dir = save_dir , name = name + tag , model = model , template = template ) def _no_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . arange ( size ) def _bypass_summary_writer ( self , ** kwargs ) -> None : pass # Doing nothing to early-stopping def _bypass_stop_handler ( self , ** kwargs ): return False # Doing nothing with lr def _bypass_lr_decay_handler ( self , ** kwargs ): pass # Doing nothing to checkpoint def _bypass_checkpoint_handler ( self , ** kwargs ): pass # When data is a NumPy array def _get_vector_data ( self , dataset : Union [ np . ndarray , torch . Tensor ] = None , indices : np . ndarray = None , ) -> torch . Tensor : if dataset is None : return None elif isinstance ( dataset , Dataset ): return dataset ()[ indices ] else : return dataset [ indices ] # When data is stored in a HDF5 dataset def _get_ondisk_data ( self , dataset : callable = None , indices : np . ndarray = None ) -> torch . Tensor : return dataset ( indices = indices ) # Preparing the batches (converting format and moving to the correct device) # in a single batch optimization loop def _make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : item . to ( device ) for key , item in input_data . items ()} else : input_data_dict = { self . input_data_name : input_data . to ( device )} return input_data_dict # Preparing the batches (converting format and moving to the correct device) def _batchwise_make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" , batch_indices : torch . Tensor = None , ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : self . get_data ( dataset = item , indices = batch_indices ) . to ( device ) for key , item in input_data . items () } else : input_data_dict = { self . input_data_name : self . get_data ( dataset = input_data , indices = batch_indices ) . to ( device ) } return input_data_dict # Getting up optimizer from the supported engines def _get_optimizer ( self , optimizer : str = None ) -> torch . nn . Module : try : for optim_module in self . optim_modules : mod_items = dir ( optim_module ) mod_items_lower = [ item . lower () for item in mod_items ] if optimizer in mod_items_lower : print ( f \"Optimizer { optimizer } found in { optim_module . __name__ } .\" ) optimizer_name = mod_items [ mod_items_lower . index ( optimizer )] return getattr ( optim_module , optimizer_name ) else : print ( f \"Optimizer { optimizer } not found in { optim_module . __name__ } .\" ) except : raise Exception ( f \"There is no correspondent to { optimizer } in any known optimization module.\" ) # Getting up loss function from the correspondent module def _get_loss ( self , loss : str = None ) -> callable : if type ( loss ) == str : name = loss . upper () return getattr ( self . losses_module , name + \"Loss\" ) elif callable ( loss ): return loss else : return f \"loss must be str or callable, but received { type ( loss ) } \" # Single batch optimization loop def _optimization_loop ( self , n_epochs : int = None , loss_function : callable = None , op : NetworkTemplate = None , loss_states : dict = None , validation_loss_function : callable = None , ) -> None : for epoch in range ( n_epochs ): self . optimizer_instance . zero_grad () self . optimizer_instance . step ( loss_function ) self . checkpoint_handler ( model = op , epoch = epoch , ** self . checkpoint_params ) self . summary_writer ( loss_states = loss_states , epoch = epoch ) self . lr_decay_handler ( epoch = epoch ) self . loss_states = loss_states # Basic version of the mini-batch optimization loop # TODO It could be parallelized def _batchwise_optimization_loop ( self , n_epochs : int = None , batch_size : int = None , loss : Union [ str , type ] = None , op : NetworkTemplate = None , input_data : torch . Tensor = None , target_data : torch . Tensor = None , validation_data : Tuple [ torch . Tensor ] = None , params : dict = None , device : str = \"cpu\" , ) -> None : print ( \"Executing batchwise optimization loop.\" ) if isinstance ( loss , str ): loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) else : assert isinstance ( loss , type ), \"The object provided is not a LossBasics object.\" loss_class = loss try : loss_instance = loss_class ( operator = op ) except : raise Exception ( f \"It was not possible to instantiate the class { loss } .\" ) if validation_data is not None : validation_input_data , validation_target_data = validation_data validation_input_data = self . _make_input_data ( validation_input_data , device = device ) validation_target_data = validation_target_data . to ( device ) val_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : val_loss_function = None batches = np . array_split ( np . arange ( self . n_samples ), int ( self . n_samples / batch_size ) ) # Number of batchwise optimization epochs n_batch_epochs = len ( batches ) epoch = 0 # Outer loop iteration b_epoch = 0 # Total iteration stop_criterion = False # When using mini-batches, it is necessary to # determine the number of iterations for the outer optimization # loop if n_batch_epochs > n_epochs : n_epochs_global = 1 else : n_epochs_global = int ( math . ceil ( n_epochs / n_batch_epochs )) while epoch < n_epochs_global and stop_criterion == False : # For each batch-wise realization it is possible to determine a # new permutation for the samples samples_permutation = self . sampler ( size = self . n_samples ) for ibatch in batches : self . optimizer_instance . zero_grad () indices = samples_permutation [ ibatch ] input_batch = self . _batchwise_make_input_data ( input_data , device = device , batch_indices = indices ) target_batch = self . get_data ( dataset = target_data , indices = indices ) if target_batch is not None : target_batch = target_batch . to ( device ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_batch , target_data = target_batch , call_back = self . accuracy_str , ** params , ) self . optimizer_instance . step ( loss_function ) self . summary_writer ( loss_states = loss_instance . loss_states , epoch = b_epoch ) self . checkpoint_handler ( model = op , epoch = b_epoch , ** self . checkpoint_params ) self . lr_decay_handler ( epoch = b_epoch ) stop_criterion = self . stop_handler ( val_loss_function = val_loss_function ) b_epoch += 1 epoch += 1 if hasattr ( loss_instance , \"loss_states\" ): if all ( [ isinstance ( item , list ) for item in loss_instance . loss_states . values ()] ): self . loss_states = { key : np . hstack ( value ) for key , value in loss_instance . loss_states . items () } else : self . loss_states = loss_instance . loss_states # Main fit method @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , )","title":"Optimizer"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.Optimizer.__init__","text":"Parameters: Name Type Description Default optimizer str A name for a PyTorch optimizer. None early_stopping bool Early-stopping will be used or not. False summary_writer bool Write a Tensorboard run file or not. False shuffle bool Shuffle the dataset or not. True lr_decay_scheduler_params dict The parameters used for defining a learning rate decay scheme. None params dict Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). None early_stopping_params dict Parameters required by the early-stopping scheme. None checkpoint_params dict Parameters for configuring the checkpointing scheme. None Source code in simulai/optimization/_optimization.py 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False","title":"__init__()"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.Optimizer.fit","text":"Parameters: Name Type Description Default op NetworkTemplate The model which will be trained None input_data Union [ dict , Tensor , ndarray , callable ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray , callable ] The target data for the problem. None validation_data Tuple [ Union [ Tensor , ndarray , callable ]] The validation data used for the problem (if required). None n_epochs int Number of epochs for the optimization process. None loss str A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray 'rmse' params dict Extra parameters required for task-specific problems (as Physics-informed neural networks). None batch_size int The size of the batch used in each optimization epoch None device str The device in which the optimization will run, 'cpu' or 'gpu'. 'cpu' distributed bool Use distributed (multi-node) training or not. False use_jit bool Use PyTorch JIT (Just in time compilation) or not. False Source code in simulai/optimization/_optimization.py 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , )","title":"fit()"},{"location":"simulai_optimization/simulai_optimizer/#scipyinterface","text":"Source code in simulai/optimization/_optimization.py 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 class ScipyInterface : def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64 def _stack_and_convert_parameters ( self , parameters : List [ Union [ torch . Tensor , np . ndarray ]] ) -> np . ndarray : \"\"\" It produces a stack of all the model parameters. Args: parameters (List[Union[torch.Tensor, np.ndarray]]): A list containing all the model parameters in their original shapes. Returns: np.ndarray: A stack (single vertical array) of all the model parameters. \"\"\" return np . hstack ( [ param . detach () . numpy () . astype ( np . float64 ) . flatten () for param in parameters . values () ] ) def _update_and_set_parameters ( self , parameters : np . ndarray ) -> None : \"\"\" It updates the parameters with the new values estimated by the optimizer. Args: parameters (np.ndarray): The stack of all the model parameters. \"\"\" operators = [ torch . from_numpy ( parameters [ slice ( * interval )] . reshape ( shape ) . astype ( self . default_dtype ) ) . to ( self . device ) for interval , shape in zip ( self . operators_intervals , self . operators_shapes . values () ) ] for opi , parameter in enumerate ( self . fun . parameters ()): parameter . data . copy_ ( operators [ opi ]) def _exec_kwargs_forward ( self , input_data : dict = None ): \"\"\"It executes the forward pass for the model when it receives more than one input. Args: input_data dict: Data to be passed to the model. \"\"\" return self . fun . forward ( ** input_data ) def _exec_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ): \"\"\"It executes the forward pass for the model. Args: input_data (Union[np.ndarray, torch.Tensor]): Data to be passed to the model. \"\"\" return self . fun . forward ( input_data = input_data ) def _fun_num ( self , parameters : np . ndarray ) -> Tuple [ float ]: \"\"\" Args: parameters (np.ndarray): The stacked parameters defined for the model. Returns: Tuple[float]: The loss(es) defined for the optimization process. \"\"\" self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () return loss . detach () . cpu () . numpy () . astype ( np . float64 ) def _fun ( self , parameters : np . ndarray ) -> Tuple [ float , np . ndarray ]: \"\"\" Args: parameters (np.ndarray): The stack of all the trainable parameters for the model. Returns: Tuple[float, np.ndarray]: A tuple containing the value for the loss function and the array of gradients for the model parameters. \"\"\" # Setting the new values for the model parameters self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () grads = [ v . grad . detach () . cpu () . numpy () for v in self . fun . parameters ()] gradients = np . hstack ( [ v . flatten () for v , shape in zip ( grads , list ( self . operators_shapes . values ())) ] ) return loss . detach () . cpu () . numpy () . astype ( np . float64 ), gradients . astype ( np . float64 ) def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x )","title":"ScipyInterface"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.ScipyInterface.__init__","text":"An interface for using SciPy-defined optimization algorithms. Parameters: Name Type Description Default fun NetworkTemplate A model (neural network) to be trained. None optimizer str A name for an optimizar available on SciPy. None optimizer_config dict A configuration dictionary for the chosen optimizer. dict () loss callable A loss function implemented in the form of a Python function or class. None loss_config dict A configuration dictionary for the loss function. None device str The device in which the optimization will be executed ('cpu' or 'gpu'). 'cpu' jac str If necessary, define a method for evaluating the Jacobian available on SciPy. None Raises: Type Description Exception If a not recognized device is defined as 'device'. Source code in simulai/optimization/_optimization.py 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64","title":"__init__()"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.ScipyInterface.fit","text":"Parameters: Name Type Description Default input_data Union [ dict , Tensor , ndarray ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray ] The target data used for training the model. None Source code in simulai/optimization/_optimization.py 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x )","title":"fit()"},{"location":"simulai_regression/simulai_dense/","text":"red { color: red } simulai.regression # Dense # Linear # Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class Linear ( NetworkTemplate ): name = \"linear\" engine = \"torch\" def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data ) def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name ) __init__ ( input_size = None , output_size = None , bias = True , name = None ) # Linear operator F(u) = Au + b Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name forward ( input_data = None ) # Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using Linear. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 65 66 67 68 69 70 71 72 73 74 75 76 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data ) to_numpy () # It converts the tensors in Linear to numpy.ndarray. Source code in simulai/regression/_pytorch/_dense.py 78 79 80 81 def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name ) SLFNN # Bases: Linear Source code in simulai/regression/_pytorch/_dense.py 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 class SLFNN ( Linear ): def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation ) def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data )) __init__ ( input_size = None , output_size = None , bias = True , name = None , activation = 'tanh' ) # Single layer fully-connected (dense) neural network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation ) forward ( input_data = None ) # Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using SLFNN. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 110 111 112 113 114 115 116 117 118 119 120 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data )) ShallowNetwork # Bases: SLFNN Source code in simulai/regression/_pytorch/_dense.py 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 class ShallowNetwork ( SLFNN ): def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state ) __init__ ( input_size = None , hidden_size = None , output_size = None , bias = True , name = None , activation = 'tanh' ) # ELM-like (Extreme Learning Machine) shallow network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None hidden_size int Dimension of the hidden (intermediary) state. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias or not for the last layer. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size forward ( input_data = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 155 156 157 158 159 160 161 162 163 164 165 166 167 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state ) DenseNetwork # Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 class DenseNetwork ( NetworkTemplate ): name = \"dense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )] def _calculate_gain ( self , activation : str = \"Tanh\" ) -> float : \"\"\"It evaluates a multiplier coefficient, named as `gain`, which is used to enhance the funcionality of each kind of activation function. Args: activation (str, optional): (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name . lower () in self . gain_supported_activations : return torch . nn . init . calculate_gain ( name . lower ()) else : return 1 @staticmethod def _determine_initialization ( activation : str = \"Tanh\" ) -> str : \"\"\"It determines the most proper initialization method for each activation function. Args: activation (str, optional): Activation function. (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name in [ \"ReLU\" ]: return \"kaiming\" elif name == \"Siren\" : return \"siren\" else : return \"xavier\" def _setup_layer ( self , input_size : int = 0 , output_size : int = 0 , initialization : str = None , bias : bool = True , first_layer : bool = False , ) -> torch . nn . Linear : \"\"\" Args: input_size (int, optional): Dimension of the input. (Default value = 0) output_size (int, optional): Dimension of the output. (Default value = 0) initialization (str, optional): Initialization method. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) first_layer (bool, optional): Is this layer the first layer or not. (Default value = False) \"\"\" # It instantiates a linear operation # f: y^l = f(x^(l-1)) = (W^l).dot(x^(l-1)) + b^l layer = torch . nn . Linear ( input_size , output_size , bias = bias ) if initialization == \"xavier\" : torch . nn . init . xavier_normal_ ( layer . weight , gain = self . _calculate_gain ( self . activations_str [ 0 ]) ) return layer # The Siren initialization requires some special consideration elif initialization == \"siren\" : assert ( self . c is not None ), \"When using siren, the parameter c must be defined.\" assert ( self . omega_0 is not None ), \"When using siren, the parameter omega_0 must be defined.\" if first_layer == True : m = 1 / input_size else : m = np . sqrt ( self . c / input_size ) / self . omega_0 torch . nn . init . trunc_normal_ ( layer . weight , a =- m , b = m ) b = np . sqrt ( 1 / input_size ) torch . nn . init . trunc_normal_ ( layer . bias , a =- b , b = b ) return layer elif initialization == \"kaiming\" : return layer # Kaiming is the default initialization in PyTorch else : print ( \"Initialization method still not implemented. \\ Using Kaiming instead\" ) return layer # The forward step of the network @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor __init__ ( layers_units = None , activations = None , input_size = None , output_size = None , normalization = 'bypass' , name = '' , last_bias = True , last_activation = 'identity' , ** kwargs ) # Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )] forward ( input_data = None ) # It executes the forward step for the DenseNetwork. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input tensor to be processed by DenseNetwork. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor ResDenseNetwork # Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 class ResDenseNetwork ( DenseNetwork ): name = \"residualdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :]) # Merging the layers into a reasonable sequence def _merge ( self , layer : list = None , act : list = None ) -> list : \"\"\"It merges the dense layers and the activations into a single block. Args: layer (list, optional): List of dense layers. (Default value = None) act (list, optional): List of activation functions. (Default value = None) \"\"\" merged_list = list () for i , j in zip ( layer , act ): merged_list . append ( i ) merged_list . append ( j ) return merged_list def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor __init__ ( layers_units = None , activations = None , input_size = None , output_size = None , normalization = 'bypass' , name = '' , last_bias = True , last_activation = 'identity' , residual_size = 1 , ** kwargs ) # Residual Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' residual_size int Size of the residual block. (Default value = 1) 1 Source code in simulai/regression/_pytorch/_dense.py 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :]) forward ( input_data = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor summary () # It prints a summary of the network. Source code in simulai/regression/_pytorch/_dense.py 476 477 478 479 480 481 482 483 484 485 def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block ) ConvexDenseNetwork # Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 class ConvexDenseNetwork ( DenseNetwork ): name = \"convexdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) def _check_regular_net ( self , layers_units : list ) -> bool : \"\"\"It checks if all the layers has the same number of neurons. Args: layers_units (list): \"\"\" mean = int ( sum ( layers_units ) / len ( layers_units )) self . hidden_size = mean if len ([ True for j in layers_units if j == mean ]) == len ( layers_units ): return True else : return False @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor __init__ ( layers_units = None , activations = None , input_size = None , output_size = None , normalization = 'bypass' , name = '' , last_bias = True , last_activation = 'identity' , ** kwargs ) # Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) forward ( input_data = None , u = None , v = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Input data to be processed using ConvexDenseNetwork. (Default value = None) None u Union [ Tensor , ndarray ] Input generated by the first auxiliar encoder (external model). (Default value = None) None v Union [ Tensor , ndarray ] Input generated by the second auxiliar encoder (external model). (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor","title":"Simulai dense"},{"location":"simulai_regression/simulai_dense/#simulairegression","text":"","title":"simulai.regression"},{"location":"simulai_regression/simulai_dense/#dense","text":"","title":"Dense"},{"location":"simulai_regression/simulai_dense/#linear","text":"Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class Linear ( NetworkTemplate ): name = \"linear\" engine = \"torch\" def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data ) def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name )","title":"Linear"},{"location":"simulai_regression/simulai_dense/#simulai.regression.Linear.__init__","text":"Linear operator F(u) = Au + b Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.Linear.forward","text":"Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using Linear. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 65 66 67 68 69 70 71 72 73 74 75 76 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data )","title":"forward()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.Linear.to_numpy","text":"It converts the tensors in Linear to numpy.ndarray. Source code in simulai/regression/_pytorch/_dense.py 78 79 80 81 def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name )","title":"to_numpy()"},{"location":"simulai_regression/simulai_dense/#slfnn","text":"Bases: Linear Source code in simulai/regression/_pytorch/_dense.py 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 class SLFNN ( Linear ): def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation ) def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data ))","title":"SLFNN"},{"location":"simulai_regression/simulai_dense/#simulai.regression.SLFNN.__init__","text":"Single layer fully-connected (dense) neural network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation )","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.SLFNN.forward","text":"Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using SLFNN. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 110 111 112 113 114 115 116 117 118 119 120 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data ))","title":"forward()"},{"location":"simulai_regression/simulai_dense/#shallownetwork","text":"Bases: SLFNN Source code in simulai/regression/_pytorch/_dense.py 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 class ShallowNetwork ( SLFNN ): def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state )","title":"ShallowNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ShallowNetwork.__init__","text":"ELM-like (Extreme Learning Machine) shallow network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None hidden_size int Dimension of the hidden (intermediary) state. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias or not for the last layer. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ShallowNetwork.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 155 156 157 158 159 160 161 162 163 164 165 166 167 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state )","title":"forward()"},{"location":"simulai_regression/simulai_dense/#densenetwork","text":"Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 class DenseNetwork ( NetworkTemplate ): name = \"dense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )] def _calculate_gain ( self , activation : str = \"Tanh\" ) -> float : \"\"\"It evaluates a multiplier coefficient, named as `gain`, which is used to enhance the funcionality of each kind of activation function. Args: activation (str, optional): (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name . lower () in self . gain_supported_activations : return torch . nn . init . calculate_gain ( name . lower ()) else : return 1 @staticmethod def _determine_initialization ( activation : str = \"Tanh\" ) -> str : \"\"\"It determines the most proper initialization method for each activation function. Args: activation (str, optional): Activation function. (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name in [ \"ReLU\" ]: return \"kaiming\" elif name == \"Siren\" : return \"siren\" else : return \"xavier\" def _setup_layer ( self , input_size : int = 0 , output_size : int = 0 , initialization : str = None , bias : bool = True , first_layer : bool = False , ) -> torch . nn . Linear : \"\"\" Args: input_size (int, optional): Dimension of the input. (Default value = 0) output_size (int, optional): Dimension of the output. (Default value = 0) initialization (str, optional): Initialization method. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) first_layer (bool, optional): Is this layer the first layer or not. (Default value = False) \"\"\" # It instantiates a linear operation # f: y^l = f(x^(l-1)) = (W^l).dot(x^(l-1)) + b^l layer = torch . nn . Linear ( input_size , output_size , bias = bias ) if initialization == \"xavier\" : torch . nn . init . xavier_normal_ ( layer . weight , gain = self . _calculate_gain ( self . activations_str [ 0 ]) ) return layer # The Siren initialization requires some special consideration elif initialization == \"siren\" : assert ( self . c is not None ), \"When using siren, the parameter c must be defined.\" assert ( self . omega_0 is not None ), \"When using siren, the parameter omega_0 must be defined.\" if first_layer == True : m = 1 / input_size else : m = np . sqrt ( self . c / input_size ) / self . omega_0 torch . nn . init . trunc_normal_ ( layer . weight , a =- m , b = m ) b = np . sqrt ( 1 / input_size ) torch . nn . init . trunc_normal_ ( layer . bias , a =- b , b = b ) return layer elif initialization == \"kaiming\" : return layer # Kaiming is the default initialization in PyTorch else : print ( \"Initialization method still not implemented. \\ Using Kaiming instead\" ) return layer # The forward step of the network @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor","title":"DenseNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.DenseNetwork.__init__","text":"Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )]","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.DenseNetwork.forward","text":"It executes the forward step for the DenseNetwork. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input tensor to be processed by DenseNetwork. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor","title":"forward()"},{"location":"simulai_regression/simulai_dense/#resdensenetwork","text":"Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 class ResDenseNetwork ( DenseNetwork ): name = \"residualdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :]) # Merging the layers into a reasonable sequence def _merge ( self , layer : list = None , act : list = None ) -> list : \"\"\"It merges the dense layers and the activations into a single block. Args: layer (list, optional): List of dense layers. (Default value = None) act (list, optional): List of activation functions. (Default value = None) \"\"\" merged_list = list () for i , j in zip ( layer , act ): merged_list . append ( i ) merged_list . append ( j ) return merged_list def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor","title":"ResDenseNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ResDenseNetwork.__init__","text":"Residual Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' residual_size int Size of the residual block. (Default value = 1) 1 Source code in simulai/regression/_pytorch/_dense.py 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :])","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ResDenseNetwork.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor","title":"forward()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ResDenseNetwork.summary","text":"It prints a summary of the network. Source code in simulai/regression/_pytorch/_dense.py 476 477 478 479 480 481 482 483 484 485 def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block )","title":"summary()"},{"location":"simulai_regression/simulai_dense/#convexdensenetwork","text":"Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 class ConvexDenseNetwork ( DenseNetwork ): name = \"convexdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) def _check_regular_net ( self , layers_units : list ) -> bool : \"\"\"It checks if all the layers has the same number of neurons. Args: layers_units (list): \"\"\" mean = int ( sum ( layers_units ) / len ( layers_units )) self . hidden_size = mean if len ([ True for j in layers_units if j == mean ]) == len ( layers_units ): return True else : return False @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor","title":"ConvexDenseNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ConvexDenseNetwork.__init__","text":"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , )","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ConvexDenseNetwork.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Input data to be processed using ConvexDenseNetwork. (Default value = None) None u Union [ Tensor , ndarray ] Input generated by the first auxiliar encoder (external model). (Default value = None) None v Union [ Tensor , ndarray ] Input generated by the second auxiliar encoder (external model). (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor","title":"forward()"},{"location":"simulai_regression/simulai_opinf/","text":"red { color: red } OpInf # OpInf # Source code in simulai/regression/_opinf.py 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 class OpInf : def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\" # Matrix containing all the model parameters @property def O_hat ( self ) -> np . ndarray : \"\"\"The concatenation of all the coefficients matrices\"\"\" valid = [ m for m in [ self . c_hat , self . A_hat , self . H_hat , self . B_hat ] if m is not None ] return np . hstack ( valid ) @property def D_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the data matrix\"\"\" return np . array ([ self . n_samples , self . n_linear_terms + self . n_quadratic_inputs ]) @property def Res_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the right-hand side residual matrix\"\"\" return np . array ([ self . n_samples , self . n_outputs ]) @property def m_indices ( self ) -> list : \"\"\"Indices for the non-repeated observables in the Kronecker product output \"\"\" return np . vstack ([ self . i_u , self . j_u ]) . T . tolist () @property def solver_nature ( self ) -> str : \"\"\"It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: str: the solver classification \"\"\" if self . solver == \"pinv\" : return \"lazy\" else : return \"memory\" # Splitting the global solution into corresponding operators def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T # Setting up model parameters def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value ) @property def check_fits_in_memory ( self ) -> str : \"\"\"It checks if the data matrices, D and Res_matrix, can fit on memory Returns: str: the method for dealing with the data matrix, 'batch- wise' or 'global' \"\"\" total_size = np . prod ( self . D_matrix_dim ) + np . prod ( self . Res_matrix_dim ) item_size = np . array ([ 0 ]) . astype ( \"float64\" ) . itemsize allocated_memory = total_size * item_size available_memory = psutil . virtual_memory () . available if allocated_memory >= available_memory : print ( \"The data matrices does not fit in memory. Using batchwise process.\" ) return \"batchwise\" else : print ( \"The data matrices fits in memory.\" ) return \"global\" # It checks if a matrix is symmetric def _is_symmetric ( self , matrix : np . ndarray = None ) -> bool : \"\"\"It checks if the system matrix is symmetric Args: matrix (np.ndarray): the global system matrix Returns: bool: Is the matrix symmetric ? True or False \"\"\" return np . array_equal ( matrix , matrix . T ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Kronecker product augmented using extra variables (such as forcing terms) def _augmented_kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays with self products for a and b Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" ab = np . concatenate ([ a , b ], axis =- 1 ) kron_ab = self . _kronecker_product ( a = ab , b = ab ) return kron_ab # Kronecker product for the variables themselves def _simple_kronecker_product ( self , a : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Kronecker product with a=b Args: a (np;ndarray): first element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" kron_aa = self . _kronecker_product ( a = a , b = a ) return kron_aa # Serially constructing operators def _serial_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a serial way Args: input_chunks (List[np.ndarray]): list of input data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" for ii , ( i_chunk , t_chunk , f_chunk ) in enumerate ( zip ( input_chunks , target_chunks , forcing_chunks ) ): sys . stdout . write ( \" \\r Processing chunk {} of {} \" . format ( ii + 1 , len ( input_chunks )) ) sys . stdout . flush () D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = i_chunk , target_data = t_chunk , forcing_data = f_chunk ) D_o += D_o_ii R_matrix += R_matrix_ii return D_o , R_matrix # Parallely constructing operators def _parallel_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a parallel way Args: input_chunks (List[np.ndarray]): list of input data chunks forcing_chunks (List[np.ndarray]): list of forcing data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" # All the datasets list must have the same length in order to allow the compatibility and the partitions # between workers. assert len ( input_chunks ) == len ( target_chunks ) == len ( forcing_chunks ), ( \"All the list must have the same\" \"length, but received \" f \" { len ( input_chunks ) } , \" f \" { len ( target_chunks ) } and\" f \" { len ( forcing_chunks ) } \" ) keys = list () comm = MPI . COMM_WORLD rank = comm . Get_rank () n_chunks = len ( input_chunks ) if rank == 0 : for batch_id in range ( n_chunks ): print ( \"Preparing the batch {} \" . format ( batch_id )) keys . append ( f \"batch_ { batch_id } \" ) input_chunks = comm . bcast ( input_chunks , root = 0 ) target_chunks = comm . bcast ( target_chunks , root = 0 ) forcing_chunks = comm . bcast ( forcing_chunks , root = 0 ) keys = comm . bcast ( keys , root = 0 ) comm . barrier () kwargs = { \"input_chunks\" : input_chunks , \"target_chunks\" : target_chunks , \"forcing_chunks\" : forcing_chunks , \"key\" : keys , } # Pipeline for executing MPI jobs for independent sub-processes mpi_run = PipelineMPI ( exec = self . _parallel_exec_wrapper , collect = True , show_log = self . show_log ) # Fitting the model instances in parallel mpi_run . run ( kwargs = kwargs ) # When MPI finishes a run it outputs a dictionary containing status_dict the # partial result of each worker if mpi_run . success : out = mpi_run . status_dict values = out . values () # Each field in the output dictionary contains a tuple (D_0, R_matrix) # with the partial values of the OpInf system matrices D_o = sum ([ v [ 0 ] for v in values ]) R_matrix = sum ([ v [ 1 ] for v in values ]) self . success = True else : self . continuing = 0 return D_o , R_matrix # Wrapper for the independent parallel process def _parallel_exec_wrapper ( self , input_chunks : np . ndarray = None , target_chunks : np . ndarray = None , forcing_chunks : list = None , key : str = None , ) -> dict : D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = input_chunks , target_data = target_chunks , forcing_data = forcing_chunks , ) return { key : [ D_o_ii , R_matrix_ii ]} def _generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case # The field variables quadratic terms are used anyway. n_samples = input_data . shape [ 0 ] quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) # Matrix used for including constant terms in the operator expression unitary_matrix = self . bias_rescale * np . ones (( n_samples , 1 )) # Known data matrix (D) if forcing_data is not None : # Constructing D using purely linear forcing terms D = np . hstack ( [ unitary_matrix , input_data , forcing_data , quadratic_input_data ] ) else : D = np . hstack ([ unitary_matrix , input_data , quadratic_input_data ]) # Target data Res_matrix = target_data . T return D , Res_matrix # Creating datasets on disk with lazy access def _lazy_generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , save_path : str = None , batch_size : int = None , ) -> ( h5py . Dataset , h5py . Dataset , List [ slice ]): def batch_forcing ( batch : np . ndarray = None ) -> np . ndarray : return forcing_data [ batch ] def pass_forcing ( * args ) -> np . ndarray : return None if forcing_data is None : handle_forcing = pass_forcing else : handle_forcing = batch_forcing if save_path is None : save_path = self . tmp_data_path filename = os . path . join ( save_path , \"data_matrices.hdf5\" ) f = h5py . File ( filename , mode = \"w\" ) Ddset = f . create_dataset ( \"D\" , shape = tuple ( self . D_matrix_dim ), dtype = \"f\" ) Rdset = f . create_dataset ( \"Res_matrix\" , shape = tuple ( self . Res_matrix_dim ), dtype = \"f\" ) max_batches = int ( self . n_samples / batch_size ) batches = [ slice ( item [ 0 ], item [ - 1 ]) for item in np . array_split ( np . arange ( 0 , self . n_samples , 1 ), max_batches ) ] for batch in batches : # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data [ batch ], target_data = target_data [ batch ], forcing_data = handle_forcing ( batch ), ) Ddset [ batch ] = D Rdset [ batch ] = Res_matrix . T return Ddset , Rdset , batches , filename # Direct construction def _construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data ) # Constructing the data-driven component of the left operator D_o = D . T @ D # Constructing the right residual matrix R_matrix = D . T @ Res_matrix . T return D_o , R_matrix # Operators can be constructed incrementally when the dimensions are too large to # fit in common RAM. It also can be parallelized without major issues def _incremental_construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , ) -> ( np . ndarray , np . ndarray ): D_o = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_linear_terms + self . n_quadratic_inputs , ) ) R_matrix = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_outputs ) ) n_samples = input_data . shape [ 0 ] n_chunks = int ( n_samples / batch_size ) input_chunks = np . array_split ( input_data , n_chunks , axis = 0 ) target_chunks = np . array_split ( target_data , n_chunks , axis = 0 ) if forcing_data is not None : forcing_chunks = np . array_split ( forcing_data , n_chunks , axis = 0 ) else : forcing_chunks = n_chunks * [ None ] # The incremental dispatcher can be serial or parallel. D_o , R_matrix = self . dispatcher ( input_chunks = input_chunks , target_chunks = target_chunks , forcing_chunks = forcing_chunks , D_o = D_o , R_matrix = R_matrix , ) return D_o , R_matrix def _builtin_jacobian ( self , x ): return self . A_hat + ( self . K_op @ x . T ) def _external_jacobian ( self , x ): return self . jacobian_op ( x ) def _get_H_hat_column_position ( self , i : int , j : int ) -> Union [ int , None ]: jj = j - i return int (( i / 2 ) * ( 2 * self . n_inputs + 1 - i ) + jj ) def _define_H_hat_coefficient_function ( self , k : int , l : int , n : int , m : int ): if m is not None : H_coeff = self . H_hat [ k , m ] else : H_coeff = 0 if n == l : H_term = 2 * H_coeff else : H_term = H_coeff self . K_op [ k , l , n ] = H_term # Constructing a tensor for evaluating Jacobians def construct_K_op ( self , op : callable = None ) -> None : # Vector versions of the index functions get_H_hat_column_position = np . vectorize ( self . _get_H_hat_column_position ) define_H_hat_coefficient_function = np . vectorize ( self . _define_H_hat_coefficient_function ) if hasattr ( self , \"n_outputs\" ) is False : self . n_outputs = self . n_inputs if op is None : self . K_op = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) K = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) for k in range ( self . n_outputs ): K [ k , ... ] = k K = K . astype ( int ) ll = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) nn = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) L , N = np . meshgrid ( ll , nn , indexing = \"ij\" ) M_ = get_H_hat_column_position ( L , N ) M_u = np . triu ( M_ ) M = ( M_u + M_u . T - M_u . diagonal () * np . eye ( self . n_inputs )) . astype ( int ) define_H_hat_coefficient_function ( K , L , N , M ) self . jacobian = self . _builtin_jacobian else : self . jacobian_op = op self . jacobian = self . _external_jacobian # Constructing the basic setup def construct ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ) -> None : # Collecting information dimensional information from the datasets if ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == True ): assert len ( input_data . shape ) == len ( target_data . shape ) == 2 , ( \"The input and target data, \" \"must be two-dimensional but received shapes\" f \" { input_data . shape } and { target_data . shape } \" ) self . n_samples = input_data . shape [ 0 ] # When there are forcing variables there are extra operators in the model if self . forcing is not None : assert ( forcing_data is not None ), \"If the forcing terms are used, forcing data must be provided.\" assert len ( forcing_data . shape ) == 2 , ( \"The forcing data must be two-dimensional,\" f \" but received shape { forcing_data . shape } \" ) assert ( input_data . shape [ 0 ] == target_data . shape [ 0 ] == forcing_data . shape [ 0 ] ), ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } , { target_data . shape [ 0 ] } and { forcing_data . shape [ 0 ] } .\" ) self . n_forcing_inputs = forcing_data . shape [ 1 ] # For no forcing cases, the classical form is adopted else : print ( \"Forcing terms are not being used.\" ) assert input_data . shape [ 0 ] == target_data . shape [ 0 ], ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } and { target_data . shape [ 0 ] } \" ) # Number of inputs or degrees of freedom self . n_inputs = input_data . shape [ 1 ] self . n_outputs = target_data . shape [ 1 ] # When no dataset is provided to fit, it is necessary directly setting up the dimension values elif ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == False ): assert self . n_inputs != None and self . n_outputs != None , ( \"It is necessary to provide some\" \" value to n_inputs and n_outputs\" ) else : raise Exception ( \"There is no way for executing the system construction\" \" if no dataset or dimension is provided.\" ) # Defining parameters for the Kronecker product if ( self . forcing is None ) or ( self . forcing == \"linear\" ): # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] # When the forcing interaction is 'nonlinear', there operator H_hat is extended elif self . forcing == \"nonlinear\" : # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs + self . n_forcing_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] else : print ( f \"The option { self . forcing } is not allowed for the forcing kind.\" ) # Number of linear terms if forcing_data is not None : self . n_forcing_inputs = forcing_data . shape [ 1 ] self . n_linear_terms = 1 + self . n_inputs + self . n_forcing_inputs else : self . n_linear_terms = 1 + self . n_inputs self . raw_model = False # Evaluating the model operators def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" ) # Making residual evaluations using the trained operator without forcing terms def _eval ( self , input_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += self . c_hat . T return output # Making residual evaluations using the trained operator with forcing terms def _eval_forcing ( self , input_data : np . ndarray = None , forcing_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += forcing_data @ self . B_hat . T output += self . c_hat . T return output def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs ) # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) # Saving to disk a lean version of the model def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) D_matrix_dim : np . ndarray property # The dimension of the data matrix O_hat : np . ndarray property # The concatenation of all the coefficients matrices Res_matrix_dim : np . ndarray property # The dimension of the right-hand side residual matrix check_fits_in_memory : str property # It checks if the data matrices, D and Res_matrix, can fit on memory Returns: Name Type Description str str the method for dealing with the data matrix, 'batch- str wise' or 'global' m_indices : list property # Indices for the non-repeated observables in the Kronecker product output solver_nature : str property # It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: Name Type Description str str the solver classification __init__ ( forcing = None , bias_rescale = 1 , solver = 'lstsq' , parallel = None , show_log = False , engine = 'numpy' ) # Operator Inference (OpInf) Parameters: Name Type Description Default forcing str the kind of forcing to be used, 'linear' or 'nonlinear' None bias_rescale float factor for rescaling the linear coefficients (c_hat) 1 solver Union [ str , callable ] solver to be used for solving the global system, e. g. 'lstsq'. 'lstsq' parallel str the kind of parallelism to be used (currently, 'mpi' or None) None engine str the engine to be used for constructing the global system (currently just 'numpy') 'numpy' Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\" eval ( input_data = None , ** kwargs ) # Evaluating using the trained model Parameters: Name Type Description Default input_data ndarray array containing the input data None Returns: Type Description ndarray np.ndarray: output evaluation using the trained model Source code in simulai/regression/_opinf.py 952 953 954 955 956 957 958 959 960 961 962 def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs ) fit ( input_data = None , target_data = None , forcing_data = None , batch_size = None , Lambda = None , continuing = True , fit_partial = False , force_lazy_access = False , k_svd = None , save_path = None ) # Solving an Operator Inference system from large dataset Parameters: Name Type Description Default input_data ndarray dataset for the input data None target_data ndarray dataset for the target data None forcing_data ndarray dataset for the forcing data None batch_size int size of the batch used for creating the global system matrices None Lambda ndarray customized regularization matrix None Source code in simulai/regression/_opinf.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" ) lean_save ( save_path = None , model_name = None ) # Lean saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) save ( save_path = None , model_name = None ) # Complete saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) set ( ** kwargs ) # Setting up extra parameters (as regularization terms) Parameters: Name Type Description Default **kwargs dict dictionary containing extra parameters {} Returns: Type Description nothing Source code in simulai/regression/_opinf.py 206 207 208 209 210 211 212 213 214 215 216 217 def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value ) set_operators ( global_matrix = None ) # Setting up each operator using the global system solution Parameters: Name Type Description Default global_matrix ndarray the solution of the global system None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T","title":"Simulai opinf"},{"location":"simulai_regression/simulai_opinf/#opinf","text":"","title":"OpInf"},{"location":"simulai_regression/simulai_opinf/#opinf_1","text":"Source code in simulai/regression/_opinf.py 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 class OpInf : def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\" # Matrix containing all the model parameters @property def O_hat ( self ) -> np . ndarray : \"\"\"The concatenation of all the coefficients matrices\"\"\" valid = [ m for m in [ self . c_hat , self . A_hat , self . H_hat , self . B_hat ] if m is not None ] return np . hstack ( valid ) @property def D_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the data matrix\"\"\" return np . array ([ self . n_samples , self . n_linear_terms + self . n_quadratic_inputs ]) @property def Res_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the right-hand side residual matrix\"\"\" return np . array ([ self . n_samples , self . n_outputs ]) @property def m_indices ( self ) -> list : \"\"\"Indices for the non-repeated observables in the Kronecker product output \"\"\" return np . vstack ([ self . i_u , self . j_u ]) . T . tolist () @property def solver_nature ( self ) -> str : \"\"\"It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: str: the solver classification \"\"\" if self . solver == \"pinv\" : return \"lazy\" else : return \"memory\" # Splitting the global solution into corresponding operators def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T # Setting up model parameters def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value ) @property def check_fits_in_memory ( self ) -> str : \"\"\"It checks if the data matrices, D and Res_matrix, can fit on memory Returns: str: the method for dealing with the data matrix, 'batch- wise' or 'global' \"\"\" total_size = np . prod ( self . D_matrix_dim ) + np . prod ( self . Res_matrix_dim ) item_size = np . array ([ 0 ]) . astype ( \"float64\" ) . itemsize allocated_memory = total_size * item_size available_memory = psutil . virtual_memory () . available if allocated_memory >= available_memory : print ( \"The data matrices does not fit in memory. Using batchwise process.\" ) return \"batchwise\" else : print ( \"The data matrices fits in memory.\" ) return \"global\" # It checks if a matrix is symmetric def _is_symmetric ( self , matrix : np . ndarray = None ) -> bool : \"\"\"It checks if the system matrix is symmetric Args: matrix (np.ndarray): the global system matrix Returns: bool: Is the matrix symmetric ? True or False \"\"\" return np . array_equal ( matrix , matrix . T ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Kronecker product augmented using extra variables (such as forcing terms) def _augmented_kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays with self products for a and b Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" ab = np . concatenate ([ a , b ], axis =- 1 ) kron_ab = self . _kronecker_product ( a = ab , b = ab ) return kron_ab # Kronecker product for the variables themselves def _simple_kronecker_product ( self , a : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Kronecker product with a=b Args: a (np;ndarray): first element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" kron_aa = self . _kronecker_product ( a = a , b = a ) return kron_aa # Serially constructing operators def _serial_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a serial way Args: input_chunks (List[np.ndarray]): list of input data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" for ii , ( i_chunk , t_chunk , f_chunk ) in enumerate ( zip ( input_chunks , target_chunks , forcing_chunks ) ): sys . stdout . write ( \" \\r Processing chunk {} of {} \" . format ( ii + 1 , len ( input_chunks )) ) sys . stdout . flush () D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = i_chunk , target_data = t_chunk , forcing_data = f_chunk ) D_o += D_o_ii R_matrix += R_matrix_ii return D_o , R_matrix # Parallely constructing operators def _parallel_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a parallel way Args: input_chunks (List[np.ndarray]): list of input data chunks forcing_chunks (List[np.ndarray]): list of forcing data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" # All the datasets list must have the same length in order to allow the compatibility and the partitions # between workers. assert len ( input_chunks ) == len ( target_chunks ) == len ( forcing_chunks ), ( \"All the list must have the same\" \"length, but received \" f \" { len ( input_chunks ) } , \" f \" { len ( target_chunks ) } and\" f \" { len ( forcing_chunks ) } \" ) keys = list () comm = MPI . COMM_WORLD rank = comm . Get_rank () n_chunks = len ( input_chunks ) if rank == 0 : for batch_id in range ( n_chunks ): print ( \"Preparing the batch {} \" . format ( batch_id )) keys . append ( f \"batch_ { batch_id } \" ) input_chunks = comm . bcast ( input_chunks , root = 0 ) target_chunks = comm . bcast ( target_chunks , root = 0 ) forcing_chunks = comm . bcast ( forcing_chunks , root = 0 ) keys = comm . bcast ( keys , root = 0 ) comm . barrier () kwargs = { \"input_chunks\" : input_chunks , \"target_chunks\" : target_chunks , \"forcing_chunks\" : forcing_chunks , \"key\" : keys , } # Pipeline for executing MPI jobs for independent sub-processes mpi_run = PipelineMPI ( exec = self . _parallel_exec_wrapper , collect = True , show_log = self . show_log ) # Fitting the model instances in parallel mpi_run . run ( kwargs = kwargs ) # When MPI finishes a run it outputs a dictionary containing status_dict the # partial result of each worker if mpi_run . success : out = mpi_run . status_dict values = out . values () # Each field in the output dictionary contains a tuple (D_0, R_matrix) # with the partial values of the OpInf system matrices D_o = sum ([ v [ 0 ] for v in values ]) R_matrix = sum ([ v [ 1 ] for v in values ]) self . success = True else : self . continuing = 0 return D_o , R_matrix # Wrapper for the independent parallel process def _parallel_exec_wrapper ( self , input_chunks : np . ndarray = None , target_chunks : np . ndarray = None , forcing_chunks : list = None , key : str = None , ) -> dict : D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = input_chunks , target_data = target_chunks , forcing_data = forcing_chunks , ) return { key : [ D_o_ii , R_matrix_ii ]} def _generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case # The field variables quadratic terms are used anyway. n_samples = input_data . shape [ 0 ] quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) # Matrix used for including constant terms in the operator expression unitary_matrix = self . bias_rescale * np . ones (( n_samples , 1 )) # Known data matrix (D) if forcing_data is not None : # Constructing D using purely linear forcing terms D = np . hstack ( [ unitary_matrix , input_data , forcing_data , quadratic_input_data ] ) else : D = np . hstack ([ unitary_matrix , input_data , quadratic_input_data ]) # Target data Res_matrix = target_data . T return D , Res_matrix # Creating datasets on disk with lazy access def _lazy_generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , save_path : str = None , batch_size : int = None , ) -> ( h5py . Dataset , h5py . Dataset , List [ slice ]): def batch_forcing ( batch : np . ndarray = None ) -> np . ndarray : return forcing_data [ batch ] def pass_forcing ( * args ) -> np . ndarray : return None if forcing_data is None : handle_forcing = pass_forcing else : handle_forcing = batch_forcing if save_path is None : save_path = self . tmp_data_path filename = os . path . join ( save_path , \"data_matrices.hdf5\" ) f = h5py . File ( filename , mode = \"w\" ) Ddset = f . create_dataset ( \"D\" , shape = tuple ( self . D_matrix_dim ), dtype = \"f\" ) Rdset = f . create_dataset ( \"Res_matrix\" , shape = tuple ( self . Res_matrix_dim ), dtype = \"f\" ) max_batches = int ( self . n_samples / batch_size ) batches = [ slice ( item [ 0 ], item [ - 1 ]) for item in np . array_split ( np . arange ( 0 , self . n_samples , 1 ), max_batches ) ] for batch in batches : # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data [ batch ], target_data = target_data [ batch ], forcing_data = handle_forcing ( batch ), ) Ddset [ batch ] = D Rdset [ batch ] = Res_matrix . T return Ddset , Rdset , batches , filename # Direct construction def _construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data ) # Constructing the data-driven component of the left operator D_o = D . T @ D # Constructing the right residual matrix R_matrix = D . T @ Res_matrix . T return D_o , R_matrix # Operators can be constructed incrementally when the dimensions are too large to # fit in common RAM. It also can be parallelized without major issues def _incremental_construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , ) -> ( np . ndarray , np . ndarray ): D_o = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_linear_terms + self . n_quadratic_inputs , ) ) R_matrix = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_outputs ) ) n_samples = input_data . shape [ 0 ] n_chunks = int ( n_samples / batch_size ) input_chunks = np . array_split ( input_data , n_chunks , axis = 0 ) target_chunks = np . array_split ( target_data , n_chunks , axis = 0 ) if forcing_data is not None : forcing_chunks = np . array_split ( forcing_data , n_chunks , axis = 0 ) else : forcing_chunks = n_chunks * [ None ] # The incremental dispatcher can be serial or parallel. D_o , R_matrix = self . dispatcher ( input_chunks = input_chunks , target_chunks = target_chunks , forcing_chunks = forcing_chunks , D_o = D_o , R_matrix = R_matrix , ) return D_o , R_matrix def _builtin_jacobian ( self , x ): return self . A_hat + ( self . K_op @ x . T ) def _external_jacobian ( self , x ): return self . jacobian_op ( x ) def _get_H_hat_column_position ( self , i : int , j : int ) -> Union [ int , None ]: jj = j - i return int (( i / 2 ) * ( 2 * self . n_inputs + 1 - i ) + jj ) def _define_H_hat_coefficient_function ( self , k : int , l : int , n : int , m : int ): if m is not None : H_coeff = self . H_hat [ k , m ] else : H_coeff = 0 if n == l : H_term = 2 * H_coeff else : H_term = H_coeff self . K_op [ k , l , n ] = H_term # Constructing a tensor for evaluating Jacobians def construct_K_op ( self , op : callable = None ) -> None : # Vector versions of the index functions get_H_hat_column_position = np . vectorize ( self . _get_H_hat_column_position ) define_H_hat_coefficient_function = np . vectorize ( self . _define_H_hat_coefficient_function ) if hasattr ( self , \"n_outputs\" ) is False : self . n_outputs = self . n_inputs if op is None : self . K_op = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) K = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) for k in range ( self . n_outputs ): K [ k , ... ] = k K = K . astype ( int ) ll = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) nn = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) L , N = np . meshgrid ( ll , nn , indexing = \"ij\" ) M_ = get_H_hat_column_position ( L , N ) M_u = np . triu ( M_ ) M = ( M_u + M_u . T - M_u . diagonal () * np . eye ( self . n_inputs )) . astype ( int ) define_H_hat_coefficient_function ( K , L , N , M ) self . jacobian = self . _builtin_jacobian else : self . jacobian_op = op self . jacobian = self . _external_jacobian # Constructing the basic setup def construct ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ) -> None : # Collecting information dimensional information from the datasets if ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == True ): assert len ( input_data . shape ) == len ( target_data . shape ) == 2 , ( \"The input and target data, \" \"must be two-dimensional but received shapes\" f \" { input_data . shape } and { target_data . shape } \" ) self . n_samples = input_data . shape [ 0 ] # When there are forcing variables there are extra operators in the model if self . forcing is not None : assert ( forcing_data is not None ), \"If the forcing terms are used, forcing data must be provided.\" assert len ( forcing_data . shape ) == 2 , ( \"The forcing data must be two-dimensional,\" f \" but received shape { forcing_data . shape } \" ) assert ( input_data . shape [ 0 ] == target_data . shape [ 0 ] == forcing_data . shape [ 0 ] ), ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } , { target_data . shape [ 0 ] } and { forcing_data . shape [ 0 ] } .\" ) self . n_forcing_inputs = forcing_data . shape [ 1 ] # For no forcing cases, the classical form is adopted else : print ( \"Forcing terms are not being used.\" ) assert input_data . shape [ 0 ] == target_data . shape [ 0 ], ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } and { target_data . shape [ 0 ] } \" ) # Number of inputs or degrees of freedom self . n_inputs = input_data . shape [ 1 ] self . n_outputs = target_data . shape [ 1 ] # When no dataset is provided to fit, it is necessary directly setting up the dimension values elif ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == False ): assert self . n_inputs != None and self . n_outputs != None , ( \"It is necessary to provide some\" \" value to n_inputs and n_outputs\" ) else : raise Exception ( \"There is no way for executing the system construction\" \" if no dataset or dimension is provided.\" ) # Defining parameters for the Kronecker product if ( self . forcing is None ) or ( self . forcing == \"linear\" ): # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] # When the forcing interaction is 'nonlinear', there operator H_hat is extended elif self . forcing == \"nonlinear\" : # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs + self . n_forcing_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] else : print ( f \"The option { self . forcing } is not allowed for the forcing kind.\" ) # Number of linear terms if forcing_data is not None : self . n_forcing_inputs = forcing_data . shape [ 1 ] self . n_linear_terms = 1 + self . n_inputs + self . n_forcing_inputs else : self . n_linear_terms = 1 + self . n_inputs self . raw_model = False # Evaluating the model operators def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" ) # Making residual evaluations using the trained operator without forcing terms def _eval ( self , input_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += self . c_hat . T return output # Making residual evaluations using the trained operator with forcing terms def _eval_forcing ( self , input_data : np . ndarray = None , forcing_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += forcing_data @ self . B_hat . T output += self . c_hat . T return output def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs ) # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) # Saving to disk a lean version of the model def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"OpInf"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.D_matrix_dim","text":"The dimension of the data matrix","title":"D_matrix_dim"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.O_hat","text":"The concatenation of all the coefficients matrices","title":"O_hat"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.Res_matrix_dim","text":"The dimension of the right-hand side residual matrix","title":"Res_matrix_dim"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.check_fits_in_memory","text":"It checks if the data matrices, D and Res_matrix, can fit on memory Returns: Name Type Description str str the method for dealing with the data matrix, 'batch- str wise' or 'global'","title":"check_fits_in_memory"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.m_indices","text":"Indices for the non-repeated observables in the Kronecker product output","title":"m_indices"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.solver_nature","text":"It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: Name Type Description str str the solver classification","title":"solver_nature"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.__init__","text":"Operator Inference (OpInf) Parameters: Name Type Description Default forcing str the kind of forcing to be used, 'linear' or 'nonlinear' None bias_rescale float factor for rescaling the linear coefficients (c_hat) 1 solver Union [ str , callable ] solver to be used for solving the global system, e. g. 'lstsq'. 'lstsq' parallel str the kind of parallelism to be used (currently, 'mpi' or None) None engine str the engine to be used for constructing the global system (currently just 'numpy') 'numpy' Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\"","title":"__init__()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.eval","text":"Evaluating using the trained model Parameters: Name Type Description Default input_data ndarray array containing the input data None Returns: Type Description ndarray np.ndarray: output evaluation using the trained model Source code in simulai/regression/_opinf.py 952 953 954 955 956 957 958 959 960 961 962 def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs )","title":"eval()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.fit","text":"Solving an Operator Inference system from large dataset Parameters: Name Type Description Default input_data ndarray dataset for the input data None target_data ndarray dataset for the target data None forcing_data ndarray dataset for the forcing data None batch_size int size of the batch used for creating the global system matrices None Lambda ndarray customized regularization matrix None Source code in simulai/regression/_opinf.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" )","title":"fit()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.lean_save","text":"Lean saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"lean_save()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.save","text":"Complete saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"save()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.set","text":"Setting up extra parameters (as regularization terms) Parameters: Name Type Description Default **kwargs dict dictionary containing extra parameters {} Returns: Type Description nothing Source code in simulai/regression/_opinf.py 206 207 208 209 210 211 212 213 214 215 216 217 def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value )","title":"set()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.set_operators","text":"Setting up each operator using the global system solution Parameters: Name Type Description Default global_matrix ndarray the solution of the global system None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T","title":"set_operators()"},{"location":"simulai_rom/simulai_rom/","text":"red { color: red } simulai.rom # POD # Bases: ROM It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that Source code in simulai/rom/_rom.py 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class POD ( ROM ): \"\"\"It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that \"\"\" name = \"pod\" def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) __init__ ( config = None , svd_filter = None ) # Propor Orthogonal Decomposition Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter fit ( data = None ) # Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) project ( data = None ) # Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) reconstruct ( projected_data = None ) # Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) restore ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 323 324 325 326 327 328 329 330 331 332 333 334 335 336 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) save ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 309 310 311 312 313 314 315 316 317 318 319 320 321 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) IPOD # Bases: ROM Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 class IPOD ( ROM ): \"\"\"Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. \"\"\" name = \"ipod\" def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) __init__ ( config = None , data_mean = None , svd_filter = None ) # Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None data_mean ndarray pre-evaluated mean of the dataset (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter fit ( data = None ) # Output shape: (space_dimension, n_modes) Parameters: Name Type Description Default data ndarray (Default value = None) None Source code in simulai/rom/_rom.py 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy project ( data = None ) # Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) reconstruct ( projected_data = None ) # Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) restore ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) save ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 501 502 503 504 505 506 507 508 509 510 511 512 513 514 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) GPOD # Bases: ROM Source code in simulai/rom/_rom.py 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 class GPOD ( ROM ): def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None # It gets the positions related to the n maximum and n minimum values to be used # to locate sensors def _extrema ( self ): locations = list () n_modes = self . modes . shape [ 0 ] for mode_i in range ( n_modes ): n_sensors = self . sensors_distribution [ mode_i ] n_minimum = n_maximum = int ( n_sensors / 2 ) locations += self . modes [ mode_i ] . argsort ()[: n_minimum ] . tolist () locations += self . modes [ mode_i ] . argsort ()[ - n_maximum :] . tolist () return locations # The m dot product (a, b)_m = (m*a, m*b), in which m is a mask array def m_dot ( self , a , b , mask_array = None ): return ( mask_array * a ) . dot (( mask_array * b ) . T ) def fit ( self , data = None ): self . pca . fit ( data = data ) self . modes = self . pca . modes n_features = self . modes . shape [ 1 ] sensors_locations = self . placer () mask_array = np . zeros (( 1 , n_features )) mask_array [:, sensors_locations ] = 1 self . mask_array = mask_array self . M = self . m_dot ( self . modes , self . modes , mask_array = mask_array ) self . M_inv = np . linalg . inv ( self . M ) print ( f \"The condition number for the matrix M is { np . linalg . cond ( self . M ) } \" ) def project ( self , data = None ): data_til = self . mask_array * data f = self . m_dot ( data_til , self . modes , mask_array = self . mask_array ) return f @ self . M_inv . T def reconstruct ( self , projected_data = None ): return self . pca . reconstruct ( projected_data = projected_data ) __init__ ( pca_type = 'pod' , pca_config = None , config = None ) # GPOD Parameters: Name Type Description Default pca_type str the kind of PCA to be used (Default value = \"pod\") 'pod' pca_config (Default value = None) None config (Default value = None) None Source code in simulai/rom/_rom.py 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None HOSVD # Bases: ROM High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 class HOSVD ( ROM ): \"\"\"High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. \"\"\" name = \"hosvd\" def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" ) def _configure_SVD ( self ) -> Union [ List [ TruncatedSVD ], List [ ParallelSVD ]]: if self . engine == \"sklearn\" : return [ TruncatedSVD ( n_components = n ) for n in self . n_components ] elif self . engine == \"dask\" : return [ ParallelSVD ( n_components = n ) for n in self . n_components ] else : raise Exception ( f \"The engine { self . engine } is not supported, it must be in ['sklearn', 'dask'].\" ) def _set_components ( self ) -> None : for j , name in enumerate ( self . components_names ): setattr ( self , name . upper () + self . _comp_tag , self . U_list [ j ]) def _k_svd ( self , data : np . ndarray = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"SVD applied to the k-mode flattening Args: data (np.ndarray, optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: Left eigenvectors matrix U \"\"\" self . svd_classes [ k ] . fit ( data ) if self . engine == \"sklearn\" : s = self . svd_classes [ k ] . singular_values_ * np . eye ( self . n_components [ k ]) VT = self . svd_classes [ k ] . components_ SVT = s @ VT U = ( np . linalg . pinv ( SVT . T ) @ data . T ) . T else : U = getattr ( self . svd_classes [ k ], \"U\" ) return U def _k_flattening ( self , data : Union [ np . ndarray , da . core . Array ] = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"k-mode flattening Args: data (Union[np.ndarray, da.core.Array], optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: reshaped array of shape (n_1, n_2*n_3*...*n_n) \"\"\" sizelist = copy . deepcopy ( self . sizelist ) sizelist_collapsible = copy . deepcopy ( sizelist ) sizelist [ 0 ] = k sizelist [ k ] = 0 sizelist_collapsible . pop ( k ) collapsible_dims = np . prod ([ self . shape [ s ] for s in sizelist_collapsible ]) if isinstance ( data , da . core . Array ): return data . transpose ( sizelist ) . reshape ( ( - 1 , collapsible_dims ), limit = self . limit ) else : return data . transpose ( sizelist ) . reshape ( - 1 , collapsible_dims ) def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components () def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose () # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) __init__ ( n_components = None , components_names = None , engine = 'sklearn' , limit = '1 GiB' ) # Parameters: Name Type Description Default n_components List [ int ] list with the number of components for each direction (Default value = None) None components_names List [ str ] (Default value = None) None engine str (Default value = \"sklearn\") 'sklearn' limit str (Default value = \"1 GiB\") '1 GiB' Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" ) fit ( data = None ) # Executing High-Order SVD Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components () project ( data = None ) # Projecting using the SVD basis Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description Union [ ndarray , Array ] np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') Source code in simulai/rom/_rom.py 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S reconstruct ( data = None , replace_components = None ) # Reconstruction using the pre-existent basis Parameters: Name Type Description Default data Union [ ndarray , Array ] reduced array of shape (n_1', n_2', ..., None n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: Type Description Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] n_n) Source code in simulai/rom/_rom.py 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose () save ( save_path = None , model_name = None ) # Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) ParallelSVD # Bases: ROM Source code in simulai/rom/_rom.py 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 class ParallelSVD ( ROM ): name = \"parallel_svd\" def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None def _chunk_size_condition ( self , size : int , chunk_size : int ) -> int : if size // chunk_size == 0 : return size else : return size // chunk_size def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: if self . chunks == None : chunks = [ self . _chunk_size_condition ( size , self . default_chunks_numbers [ j ]) for j , size in enumerate ( data . shape ) ] else : chunks = self . chunks if isinstance ( data , np . ndarray ): parallel_data = da . from_array ( data , chunks = chunks ) else : parallel_data = data U , s , V = da . linalg . svd_compressed ( parallel_data , k = self . n_components ) self . U = U self . s = s self . V = V __init__ ( n_components = None , chunks = None ) # Executing SVD using dask Parameters: Name Type Description Default n_components int (Default value = None) None chunks Tuple [ int ] (Default value = None) None Source code in simulai/rom/_rom.py 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None QQM # Source code in simulai/rom/_rom.py 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 class QQM : def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"It executes a Kronecker dot between two arrays Args: a (np.ndarray, optional): left array (Default value = None) b (np.ndarray, optional): right (transposed) array (Default value = None) Returns: np.ndarray: the Kronecker output array \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Each batch in W has n_inputs*(n_inputs + 1)/2 columns def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data ) def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" ) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ] def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) W_transform ( data = None ) # W_transform simply applied Kronecker product for data itself Parameters: Name Type Description Default data ndarray the data to be W-transformed (Default value = None) None Returns: Type Description ndarray np.ndarray: the Kronecker product between data and data ndarray np.ndarray: the Kronecker product between data and data ndarray itself Source code in simulai/rom/_rom.py 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data ) __init__ ( n_inputs = None , alpha_0 = None , sparsity_tol = 1e-15 , lambd = None , epsilon = 1e-10 , use_mean = False ) # It extends and enriches the POD approach by determining a quadratic basis for its residual Parameters: Name Type Description Default n_inputs int number of inputs used in the POD approximation (Default value = None) None alpha_0 float regularization parameter used in SparSA algorithm (Default value = None) None sparsity_tol float sparsity tolerance used in SpaRSA (Default value = 1e-15) 1e-15 lambd float regularization parameter used in SparSA algorithm (Default value = None) None epsilon float threshold for zeroing columns in SpaRSA (Default value = 1e-10) 1e-10 use_mean bool use mean for the SpaRSA loss function of not ? (Default value = False) False Returns: Type Description None nothing Source code in simulai/rom/_rom.py 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , ) eval ( data = None ) # It projects and reconstructs Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description None np.ndarray: the approximated data Source code in simulai/rom/_rom.py 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar fit ( input_data = None , target_data = None , pinv = False ) # It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Parameters: Name Type Description Default input_data ndarray in general, the original latent None series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" ) project ( data = None ) # Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description ndarray np.ndarray: the projection over the selected basis Source code in simulai/rom/_rom.py 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ] save ( save_path = None , model_name = None ) # Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"Simulai rom"},{"location":"simulai_rom/simulai_rom/#simulairom","text":"","title":"simulai.rom"},{"location":"simulai_rom/simulai_rom/#pod","text":"Bases: ROM It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that Source code in simulai/rom/_rom.py 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class POD ( ROM ): \"\"\"It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that \"\"\" name = \"pod\" def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"POD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.__init__","text":"Propor Orthogonal Decomposition Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.fit","text":"Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy ))","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.project","text":"Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T )","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.reconstruct","text":"Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )])","title":"reconstruct()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.restore","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 323 324 325 326 327 328 329 330 331 332 333 334 335 336 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"restore()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.save","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 309 310 311 312 313 314 315 316 317 318 319 320 321 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean )","title":"save()"},{"location":"simulai_rom/simulai_rom/#ipod","text":"Bases: ROM Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 class IPOD ( ROM ): \"\"\"Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. \"\"\" name = \"ipod\" def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"IPOD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.__init__","text":"Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None data_mean ndarray pre-evaluated mean of the dataset (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.fit","text":"Output shape: (space_dimension, n_modes) Parameters: Name Type Description Default data ndarray (Default value = None) None Source code in simulai/rom/_rom.py 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.project","text":"Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T )","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.reconstruct","text":"Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )])","title":"reconstruct()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.restore","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"restore()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.save","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 501 502 503 504 505 506 507 508 509 510 511 512 513 514 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean )","title":"save()"},{"location":"simulai_rom/simulai_rom/#gpod","text":"Bases: ROM Source code in simulai/rom/_rom.py 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 class GPOD ( ROM ): def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None # It gets the positions related to the n maximum and n minimum values to be used # to locate sensors def _extrema ( self ): locations = list () n_modes = self . modes . shape [ 0 ] for mode_i in range ( n_modes ): n_sensors = self . sensors_distribution [ mode_i ] n_minimum = n_maximum = int ( n_sensors / 2 ) locations += self . modes [ mode_i ] . argsort ()[: n_minimum ] . tolist () locations += self . modes [ mode_i ] . argsort ()[ - n_maximum :] . tolist () return locations # The m dot product (a, b)_m = (m*a, m*b), in which m is a mask array def m_dot ( self , a , b , mask_array = None ): return ( mask_array * a ) . dot (( mask_array * b ) . T ) def fit ( self , data = None ): self . pca . fit ( data = data ) self . modes = self . pca . modes n_features = self . modes . shape [ 1 ] sensors_locations = self . placer () mask_array = np . zeros (( 1 , n_features )) mask_array [:, sensors_locations ] = 1 self . mask_array = mask_array self . M = self . m_dot ( self . modes , self . modes , mask_array = mask_array ) self . M_inv = np . linalg . inv ( self . M ) print ( f \"The condition number for the matrix M is { np . linalg . cond ( self . M ) } \" ) def project ( self , data = None ): data_til = self . mask_array * data f = self . m_dot ( data_til , self . modes , mask_array = self . mask_array ) return f @ self . M_inv . T def reconstruct ( self , projected_data = None ): return self . pca . reconstruct ( projected_data = projected_data )","title":"GPOD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.GPOD.__init__","text":"GPOD Parameters: Name Type Description Default pca_type str the kind of PCA to be used (Default value = \"pod\") 'pod' pca_config (Default value = None) None config (Default value = None) None Source code in simulai/rom/_rom.py 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#hosvd","text":"Bases: ROM High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 class HOSVD ( ROM ): \"\"\"High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. \"\"\" name = \"hosvd\" def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" ) def _configure_SVD ( self ) -> Union [ List [ TruncatedSVD ], List [ ParallelSVD ]]: if self . engine == \"sklearn\" : return [ TruncatedSVD ( n_components = n ) for n in self . n_components ] elif self . engine == \"dask\" : return [ ParallelSVD ( n_components = n ) for n in self . n_components ] else : raise Exception ( f \"The engine { self . engine } is not supported, it must be in ['sklearn', 'dask'].\" ) def _set_components ( self ) -> None : for j , name in enumerate ( self . components_names ): setattr ( self , name . upper () + self . _comp_tag , self . U_list [ j ]) def _k_svd ( self , data : np . ndarray = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"SVD applied to the k-mode flattening Args: data (np.ndarray, optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: Left eigenvectors matrix U \"\"\" self . svd_classes [ k ] . fit ( data ) if self . engine == \"sklearn\" : s = self . svd_classes [ k ] . singular_values_ * np . eye ( self . n_components [ k ]) VT = self . svd_classes [ k ] . components_ SVT = s @ VT U = ( np . linalg . pinv ( SVT . T ) @ data . T ) . T else : U = getattr ( self . svd_classes [ k ], \"U\" ) return U def _k_flattening ( self , data : Union [ np . ndarray , da . core . Array ] = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"k-mode flattening Args: data (Union[np.ndarray, da.core.Array], optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: reshaped array of shape (n_1, n_2*n_3*...*n_n) \"\"\" sizelist = copy . deepcopy ( self . sizelist ) sizelist_collapsible = copy . deepcopy ( sizelist ) sizelist [ 0 ] = k sizelist [ k ] = 0 sizelist_collapsible . pop ( k ) collapsible_dims = np . prod ([ self . shape [ s ] for s in sizelist_collapsible ]) if isinstance ( data , da . core . Array ): return data . transpose ( sizelist ) . reshape ( ( - 1 , collapsible_dims ), limit = self . limit ) else : return data . transpose ( sizelist ) . reshape ( - 1 , collapsible_dims ) def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components () def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose () # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"HOSVD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.__init__","text":"Parameters: Name Type Description Default n_components List [ int ] list with the number of components for each direction (Default value = None) None components_names List [ str ] (Default value = None) None engine str (Default value = \"sklearn\") 'sklearn' limit str (Default value = \"1 GiB\") '1 GiB' Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" )","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.fit","text":"Executing High-Order SVD Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components ()","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.project","text":"Projecting using the SVD basis Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description Union [ ndarray , Array ] np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') Source code in simulai/rom/_rom.py 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.reconstruct","text":"Reconstruction using the pre-existent basis Parameters: Name Type Description Default data Union [ ndarray , Array ] reduced array of shape (n_1', n_2', ..., None n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: Type Description Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] n_n) Source code in simulai/rom/_rom.py 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose ()","title":"reconstruct()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.save","text":"Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"save()"},{"location":"simulai_rom/simulai_rom/#parallelsvd","text":"Bases: ROM Source code in simulai/rom/_rom.py 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 class ParallelSVD ( ROM ): name = \"parallel_svd\" def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None def _chunk_size_condition ( self , size : int , chunk_size : int ) -> int : if size // chunk_size == 0 : return size else : return size // chunk_size def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: if self . chunks == None : chunks = [ self . _chunk_size_condition ( size , self . default_chunks_numbers [ j ]) for j , size in enumerate ( data . shape ) ] else : chunks = self . chunks if isinstance ( data , np . ndarray ): parallel_data = da . from_array ( data , chunks = chunks ) else : parallel_data = data U , s , V = da . linalg . svd_compressed ( parallel_data , k = self . n_components ) self . U = U self . s = s self . V = V","title":"ParallelSVD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.ParallelSVD.__init__","text":"Executing SVD using dask Parameters: Name Type Description Default n_components int (Default value = None) None chunks Tuple [ int ] (Default value = None) None Source code in simulai/rom/_rom.py 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#qqm","text":"Source code in simulai/rom/_rom.py 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 class QQM : def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"It executes a Kronecker dot between two arrays Args: a (np.ndarray, optional): left array (Default value = None) b (np.ndarray, optional): right (transposed) array (Default value = None) Returns: np.ndarray: the Kronecker output array \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Each batch in W has n_inputs*(n_inputs + 1)/2 columns def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data ) def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" ) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ] def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"QQM"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.W_transform","text":"W_transform simply applied Kronecker product for data itself Parameters: Name Type Description Default data ndarray the data to be W-transformed (Default value = None) None Returns: Type Description ndarray np.ndarray: the Kronecker product between data and data ndarray np.ndarray: the Kronecker product between data and data ndarray itself Source code in simulai/rom/_rom.py 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data )","title":"W_transform()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.__init__","text":"It extends and enriches the POD approach by determining a quadratic basis for its residual Parameters: Name Type Description Default n_inputs int number of inputs used in the POD approximation (Default value = None) None alpha_0 float regularization parameter used in SparSA algorithm (Default value = None) None sparsity_tol float sparsity tolerance used in SpaRSA (Default value = 1e-15) 1e-15 lambd float regularization parameter used in SparSA algorithm (Default value = None) None epsilon float threshold for zeroing columns in SpaRSA (Default value = 1e-10) 1e-10 use_mean bool use mean for the SpaRSA loss function of not ? (Default value = False) False Returns: Type Description None nothing Source code in simulai/rom/_rom.py 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , )","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.eval","text":"It projects and reconstructs Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description None np.ndarray: the approximated data Source code in simulai/rom/_rom.py 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar","title":"eval()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.fit","text":"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Parameters: Name Type Description Default input_data ndarray in general, the original latent None series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" )","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.project","text":"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description ndarray np.ndarray: the projection over the selected basis Source code in simulai/rom/_rom.py 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ]","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.save","text":"Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"save()"}]} \ No newline at end of file +{"config":{"indexing":"full","lang":["en"],"min_search_length":3,"prebuild_index":false,"separator":"[\\s\\-]+"},"docs":[{"location":"","text":"red { color: red } Welcome to SIMUL ai # An extensible Python package with data-driven pipelines for physics-informed machine learning. The SimulAI toolkit provides easy access to state-of-the-art models and algorithms for physics-informed machine learning. Currently, it includes the following methods described in the literature: Physics-Informed Neural Networks (PINNs) Deep Operator Networks (DeepONets) Variational Encoder-Decoders (VED) Operator Inference (OpInf) Koopman Autoencoders (experimental) Echo State Networks (experimental GPU support) Transformers U-Nets In addition to the methods above, many more techniques for model reduction and regularization are included in SimulAI. See documentation . Installing # Python version requirements: 3.8 <= python <= 3.11 Using pip # For installing the most recent stable version from PyPI: pip install simulai-toolkit For installing from the latest commit sent to GitHub (just for testing and developing purposes): pip uninstall simulai_toolkit pip install -U git+https://github.com/IBM/simulai@$(git ls-remote git@github.com:IBM/simulai.git | head -1 | awk '{print $1;}')#egg=simulai_toolkit Contributing code to SimulAI # If you are interested in directly contributing to this project, please see CONTRIBUTING . Using MPI # Some methods implemented on SimulAI support multiprocessing with MPI. In order to use it, you will need a valid MPI distribution, e.g. MPICH, OpenMPI. As an example, you can use conda to install MPICH as follows: conda install -c conda-forge mpich gcc Issues with macOS # If you have problems installing gcc using the command above, we recommend you to install it using Homebrew . Using Tensorboard # Tensorboard is supported for monitoring neural network training tasks. For a tutorial about how to set it see this example . Documentation # Please, refer to the SimulAI API documentation before using the toolkit. Examples # Additionally, you can refer to examples in the respective folder . License # This software is licensed under Apache license 2.0. See LICENSE . Contributing code to SimulAI # If you are interested in directly contributing to this project, please see CONTRIBUTING . How to cite SimulAI in your publications # If you find SimulAI to be useful, please consider citing it in your published work: @misc{simulai, author = {IBM}, title = {SimulAI Toolkit}, subtitle = {A Python package with data-driven pipelines for physics-informed machine learning}, note = \"https://github.com/IBM/simulai\", doi = {10.5281/zenodo.7351516}, year = {2022}, } or, via Zenodo: @software{joao_lucas_de_sousa_almeida_2023_7566603, author = {Jo\u00e3o Lucas de Sousa Almeida and Leonardo Martins and Tar\u0131k Kaan Ko\u00e7}, title = {IBM/simulai: 0.99.13}, month = jan, year = 2023, publisher = {Zenodo}, version = {0.99.25}, doi = {10.5281/zenodo.7566603}, url = {https://doi.org/10.5281/zenodo.7566603} } Publications # Jo\u00e3o Lucas de Sousa Almeida, Pedro Roberto Barbosa Rocha, Allan Moreira de Carvalho and Alberto Costa Nogueira Jr. A coupled Variational Encoder-Decoder - DeepONet surrogate model for the Rayleigh-B\u00e9nard convection problem. In When Machine Learning meets Dynamical Systems: Theory and Applications, AAAI, 2023. Jo\u00e3o Lucas S. Almeida, Arthur C. Pires, Klaus F. V. Cid, and Alberto C. Nogueira Jr. Non-intrusive operator inference for chaotic systems. IEEE Transactions on Artificial Intelligence, pages 1--14, 2022. Pedro Roberto Barbosa Rocha, Marcos Sebasti\u00e3o de Paula Gomes, Allan Moreira de Carvalho, Jo\u00e3o Lucas de Sousa Almeida and Alberto Costa Nogueira Jr. Data-driven reduced-order model for atmospheric CO2 dispersion. In AAAI 2022 Fall Symposium: The Role of AI in Responding to Climate Challenges, 2022. Pedro Roberto Barbosa Rocha, Jo\u00e3o Lucas de Sousa Almeida, Marcos Sebasti\u00e3o de Paula Gomes, Alberto Costa Nogueira, Reduced-order modeling of the two-dimensional Rayleigh--B\u00e9nard convection flow through a non-intrusive operator inference, Engineering Applications of Artificial Intelligence, Volume 126, Part B, 2023, 106923, ISSN 0952-1976, https://doi.org/10.1016/j.engappai.2023.106923 . ( https://www.sciencedirect.com/science/article/pii/S0952197623011077 ) References # Jaeger, H., Haas, H. (2004). \\\"Harnessing Nonlinearity: Predicting Chaotic Systems and Saving Energy in Wireless Communication,\\\" Science , 304 (5667): 78--80. \\< https://doi.org/10.1126/science.1091277 >`_. Lu, L., Jin, P., Pang, G., Zhang, Z., Karniadakis, G. E. (2021). \\\"Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators,\\\" Nature Machine Intelligence , 3 (1): 218--229. ISSN: 2522-5839. \\< https://doi.org/10.1038/s42256-021-00302-5 >`_. Eivazi, H., Le Clainche, S., Hoyas, S., Vinuesa, R. (2022) \\\"Towards extraction of orthogonal and parsimonious non-linear modes from turbulent flows\\\" Expert Systems with Applications , 202 . ISSN: 0957-4174. \\< https://doi.org/10.1016/j.eswa.2022.117038 >`_. Raissi, M., Perdikaris, P., Karniadakis, G. E. (2019). \\\"Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations,\\\" Journal of Computational Physics , 378 (1): 686-707. ISSN: 0021-9991. \\< https://doi.org/10.1016/j.jcp.2018.10.045 >`_. Lusch, B., Kutz, J. N., Brunton, S.L. (2018). \\\"Deep learning for universal linear embeddings of nonlinear dynamics,\\\" Nature Communications , 9 : 4950. ISSN: 2041-1723. \\< https://doi.org/10.1038/s41467-018-07210-0 >`_. McQuarrie, S., Huang, C. and Willcox, K. (2021). \\\"Data-driven reduced-order models via regularized operator inference for a single-injector combustion process,\\\" Journal of the Royal Society of New Zealand , 51 (2): 194-211. ISSN: 0303-6758. \\< https://doi.org/10.1080/03036758.2020.1863237 >`_.","title":"Home"},{"location":"#welcome-to-simulai","text":"An extensible Python package with data-driven pipelines for physics-informed machine learning. The SimulAI toolkit provides easy access to state-of-the-art models and algorithms for physics-informed machine learning. Currently, it includes the following methods described in the literature: Physics-Informed Neural Networks (PINNs) Deep Operator Networks (DeepONets) Variational Encoder-Decoders (VED) Operator Inference (OpInf) Koopman Autoencoders (experimental) Echo State Networks (experimental GPU support) Transformers U-Nets In addition to the methods above, many more techniques for model reduction and regularization are included in SimulAI. See documentation .","title":"Welcome to SIMULai"},{"location":"#installing","text":"Python version requirements: 3.8 <= python <= 3.11","title":"Installing"},{"location":"#using-pip","text":"For installing the most recent stable version from PyPI: pip install simulai-toolkit For installing from the latest commit sent to GitHub (just for testing and developing purposes): pip uninstall simulai_toolkit pip install -U git+https://github.com/IBM/simulai@$(git ls-remote git@github.com:IBM/simulai.git | head -1 | awk '{print $1;}')#egg=simulai_toolkit","title":"Using pip"},{"location":"#contributing-code-to-simulai","text":"If you are interested in directly contributing to this project, please see CONTRIBUTING .","title":"Contributing code to SimulAI"},{"location":"#using-mpi","text":"Some methods implemented on SimulAI support multiprocessing with MPI. In order to use it, you will need a valid MPI distribution, e.g. MPICH, OpenMPI. As an example, you can use conda to install MPICH as follows: conda install -c conda-forge mpich gcc","title":"Using MPI"},{"location":"#issues-with-macos","text":"If you have problems installing gcc using the command above, we recommend you to install it using Homebrew .","title":"Issues with macOS"},{"location":"#using-tensorboard","text":"Tensorboard is supported for monitoring neural network training tasks. For a tutorial about how to set it see this example .","title":"Using Tensorboard"},{"location":"#documentation","text":"Please, refer to the SimulAI API documentation before using the toolkit.","title":"Documentation"},{"location":"#examples","text":"Additionally, you can refer to examples in the respective folder .","title":"Examples"},{"location":"#license","text":"This software is licensed under Apache license 2.0. See LICENSE .","title":"License"},{"location":"#contributing-code-to-simulai_1","text":"If you are interested in directly contributing to this project, please see CONTRIBUTING .","title":"Contributing code to SimulAI"},{"location":"#how-to-cite-simulai-in-your-publications","text":"If you find SimulAI to be useful, please consider citing it in your published work: @misc{simulai, author = {IBM}, title = {SimulAI Toolkit}, subtitle = {A Python package with data-driven pipelines for physics-informed machine learning}, note = \"https://github.com/IBM/simulai\", doi = {10.5281/zenodo.7351516}, year = {2022}, } or, via Zenodo: @software{joao_lucas_de_sousa_almeida_2023_7566603, author = {Jo\u00e3o Lucas de Sousa Almeida and Leonardo Martins and Tar\u0131k Kaan Ko\u00e7}, title = {IBM/simulai: 0.99.13}, month = jan, year = 2023, publisher = {Zenodo}, version = {0.99.25}, doi = {10.5281/zenodo.7566603}, url = {https://doi.org/10.5281/zenodo.7566603} }","title":"How to cite SimulAI in your publications"},{"location":"#publications","text":"Jo\u00e3o Lucas de Sousa Almeida, Pedro Roberto Barbosa Rocha, Allan Moreira de Carvalho and Alberto Costa Nogueira Jr. A coupled Variational Encoder-Decoder - DeepONet surrogate model for the Rayleigh-B\u00e9nard convection problem. In When Machine Learning meets Dynamical Systems: Theory and Applications, AAAI, 2023. Jo\u00e3o Lucas S. Almeida, Arthur C. Pires, Klaus F. V. Cid, and Alberto C. Nogueira Jr. Non-intrusive operator inference for chaotic systems. IEEE Transactions on Artificial Intelligence, pages 1--14, 2022. Pedro Roberto Barbosa Rocha, Marcos Sebasti\u00e3o de Paula Gomes, Allan Moreira de Carvalho, Jo\u00e3o Lucas de Sousa Almeida and Alberto Costa Nogueira Jr. Data-driven reduced-order model for atmospheric CO2 dispersion. In AAAI 2022 Fall Symposium: The Role of AI in Responding to Climate Challenges, 2022. Pedro Roberto Barbosa Rocha, Jo\u00e3o Lucas de Sousa Almeida, Marcos Sebasti\u00e3o de Paula Gomes, Alberto Costa Nogueira, Reduced-order modeling of the two-dimensional Rayleigh--B\u00e9nard convection flow through a non-intrusive operator inference, Engineering Applications of Artificial Intelligence, Volume 126, Part B, 2023, 106923, ISSN 0952-1976, https://doi.org/10.1016/j.engappai.2023.106923 . ( https://www.sciencedirect.com/science/article/pii/S0952197623011077 )","title":"Publications"},{"location":"#references","text":"Jaeger, H., Haas, H. (2004). \\\"Harnessing Nonlinearity: Predicting Chaotic Systems and Saving Energy in Wireless Communication,\\\" Science , 304 (5667): 78--80. \\< https://doi.org/10.1126/science.1091277 >`_. Lu, L., Jin, P., Pang, G., Zhang, Z., Karniadakis, G. E. (2021). \\\"Learning nonlinear operators via DeepONet based on the universal approximation theorem of operators,\\\" Nature Machine Intelligence , 3 (1): 218--229. ISSN: 2522-5839. \\< https://doi.org/10.1038/s42256-021-00302-5 >`_. Eivazi, H., Le Clainche, S., Hoyas, S., Vinuesa, R. (2022) \\\"Towards extraction of orthogonal and parsimonious non-linear modes from turbulent flows\\\" Expert Systems with Applications , 202 . ISSN: 0957-4174. \\< https://doi.org/10.1016/j.eswa.2022.117038 >`_. Raissi, M., Perdikaris, P., Karniadakis, G. E. (2019). \\\"Physics-informed neural networks: A deep learning framework for solving forward and inverse problems involving nonlinear partial differential equations,\\\" Journal of Computational Physics , 378 (1): 686-707. ISSN: 0021-9991. \\< https://doi.org/10.1016/j.jcp.2018.10.045 >`_. Lusch, B., Kutz, J. N., Brunton, S.L. (2018). \\\"Deep learning for universal linear embeddings of nonlinear dynamics,\\\" Nature Communications , 9 : 4950. ISSN: 2041-1723. \\< https://doi.org/10.1038/s41467-018-07210-0 >`_. McQuarrie, S., Huang, C. and Willcox, K. (2021). \\\"Data-driven reduced-order models via regularized operator inference for a single-injector combustion process,\\\" Journal of the Royal Society of New Zealand , 51 (2): 194-211. ISSN: 0303-6758. \\< https://doi.org/10.1080/03036758.2020.1863237 >`_.","title":"References"},{"location":"simulai_activations/","text":"red { color: red } Activations # Siren # Bases: Module Sinusoidal Representation Networks (SIREN) Source code in simulai/activations.py 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 class Siren ( torch . nn . Module ): \"\"\"Sinusoidal Representation Networks (SIREN)\"\"\" name = \"Siren\" def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c @property def share_to_host ( self ) -> dict : \"\"\"Return the parameters of the SIREN model. Returns: dict: A dictionary containing the parameters 'omega_0' and 'c'. \"\"\" return { \"omega_0\" : self . omega_0 , \"c\" : self . c } def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input ) share_to_host : dict property # Return the parameters of the SIREN model. Returns: Name Type Description dict dict A dictionary containing the parameters 'omega_0' and 'c'. __init__ ( omega_0 = None , c = None ) # Initialize SIREN model with given parameters. Parameters: Name Type Description Default omega_0 float (Default value = None) None c float (Default value = None) None Source code in simulai/activations.py 23 24 25 26 27 28 29 30 31 32 33 34 def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c forward ( input ) # Perform the forward pass of the SIREN model on the input. Parameters: Name Type Description Default input Tensor The input to the SIREN model. required Returns: Type Description Tensor torch.Tensor: The output of the SIREN model. Source code in simulai/activations.py 47 48 49 50 51 52 53 54 55 56 57 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input ) sin # Bases: Module Sine activation function. This module applies the sine function element-wise to the input. Source code in simulai/activations.py 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 class sin ( torch . nn . Module ): \"\"\"Sine activation function. This module applies the sine function element-wise to the input. \"\"\" name = \"sin\" def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ () def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input ) __init__ () # Initialize the sine activation function. Source code in simulai/activations.py 69 70 71 72 73 74 def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ () forward ( input ) # Perform the forward pass of the sine activation function on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 76 77 78 79 80 81 82 83 84 85 86 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input ) Wavelet # Bases: Module Wavelet activation Source code in simulai/activations.py 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 class Wavelet ( torch . nn . Module ): \"\"\"Wavelet activation\"\"\" name = \"wavelet\" def __init__ ( self ) -> None : super ( Wavelet , self ) . __init__ () self . w1 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) self . w2 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input ) forward ( input ) # Perform the forward pass of the Wavelet activation on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 100 101 102 103 104 105 106 107 108 109 110 111 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input )","title":"Simulai activations"},{"location":"simulai_activations/#activations","text":"","title":"Activations"},{"location":"simulai_activations/#siren","text":"Bases: Module Sinusoidal Representation Networks (SIREN) Source code in simulai/activations.py 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 class Siren ( torch . nn . Module ): \"\"\"Sinusoidal Representation Networks (SIREN)\"\"\" name = \"Siren\" def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c @property def share_to_host ( self ) -> dict : \"\"\"Return the parameters of the SIREN model. Returns: dict: A dictionary containing the parameters 'omega_0' and 'c'. \"\"\" return { \"omega_0\" : self . omega_0 , \"c\" : self . c } def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input )","title":"Siren"},{"location":"simulai_activations/#simulai.activations.Siren.share_to_host","text":"Return the parameters of the SIREN model. Returns: Name Type Description dict dict A dictionary containing the parameters 'omega_0' and 'c'.","title":"share_to_host"},{"location":"simulai_activations/#simulai.activations.Siren.__init__","text":"Initialize SIREN model with given parameters. Parameters: Name Type Description Default omega_0 float (Default value = None) None c float (Default value = None) None Source code in simulai/activations.py 23 24 25 26 27 28 29 30 31 32 33 34 def __init__ ( self , omega_0 : float = None , c : float = None ) -> None : \"\"\"Initialize SIREN model with given parameters. Args: omega_0 (float, optional): (Default value = None) c (float, optional): (Default value = None) \"\"\" super ( Siren , self ) . __init__ () self . omega_0 = omega_0 self . c = c","title":"__init__()"},{"location":"simulai_activations/#simulai.activations.Siren.forward","text":"Perform the forward pass of the SIREN model on the input. Parameters: Name Type Description Default input Tensor The input to the SIREN model. required Returns: Type Description Tensor torch.Tensor: The output of the SIREN model. Source code in simulai/activations.py 47 48 49 50 51 52 53 54 55 56 57 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the SIREN model on the input. Args: input (torch.Tensor): The input to the SIREN model. Returns: torch.Tensor: The output of the SIREN model. \"\"\" return torch . sin ( self . omega_0 * input )","title":"forward()"},{"location":"simulai_activations/#sin","text":"Bases: Module Sine activation function. This module applies the sine function element-wise to the input. Source code in simulai/activations.py 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 class sin ( torch . nn . Module ): \"\"\"Sine activation function. This module applies the sine function element-wise to the input. \"\"\" name = \"sin\" def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ () def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input )","title":"sin"},{"location":"simulai_activations/#simulai.activations.sin.__init__","text":"Initialize the sine activation function. Source code in simulai/activations.py 69 70 71 72 73 74 def __init__ ( self ) -> None : \"\"\"Initialize the sine activation function. \"\"\" super ( sin , self ) . __init__ ()","title":"__init__()"},{"location":"simulai_activations/#simulai.activations.sin.forward","text":"Perform the forward pass of the sine activation function on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 76 77 78 79 80 81 82 83 84 85 86 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the sine activation function on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return torch . sin ( input )","title":"forward()"},{"location":"simulai_activations/#wavelet","text":"Bases: Module Wavelet activation Source code in simulai/activations.py 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 class Wavelet ( torch . nn . Module ): \"\"\"Wavelet activation\"\"\" name = \"wavelet\" def __init__ ( self ) -> None : super ( Wavelet , self ) . __init__ () self . w1 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) self . w2 = torch . nn . Parameter ( torch . ones ( 1 ), requires_grad = True ) def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input )","title":"Wavelet"},{"location":"simulai_activations/#simulai.activations.Wavelet.forward","text":"Perform the forward pass of the Wavelet activation on the input. Parameters: Name Type Description Default input Tensor The input to the sine activation function. required Returns: Type Description Tensor torch.Tensor: The output of the sine activation function. Source code in simulai/activations.py 100 101 102 103 104 105 106 107 108 109 110 111 def forward ( self , input : torch . Tensor ) -> torch . Tensor : \"\"\"Perform the forward pass of the Wavelet activation on the input. Args: input (torch.Tensor): The input to the sine activation function. Returns: torch.Tensor: The output of the sine activation function. \"\"\" return self . w1 * torch . sin ( input ) + self . w2 * torch . cos ( input )","title":"forward()"},{"location":"simulai_batching/","text":"red { color: red } Batching operations # BatchwiseSampler # Source code in simulai/batching.py 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 class BatchwiseSampler : def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass # Evaluating the global minimum and maximum for all the # datasets in self.dataset def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list ) def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape ) def _normalization_bypass ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Bypass the normalization. Args: data (np.ndarray, optional): The data to be bypassed. (Default value = None) Returns: Same data: \"\"\" return data def _target_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the target data using the provided normalizer. Args: data (np.ndarray, optional): The target data to be normalized. (Default value = None) Returns: Normalized target data.: \"\"\" return self . target_normalizer ( data = data ) def _input_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the input data using the provided normalizer. Args: data (np.ndarray, optional): The input data to be normalized. (Default value = None) Returns: Normalized input data.: \"\"\" return self . input_normalizer ( data = data ) def _transpose_first_channel ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Transpose the first channel of the variables list. Args: variables_list (list, optional): (Default value = None) \"\"\" batch = np . stack ( variables_list , axis =- 1 ) dims = list ( range ( len ( batch . shape ))) dims_t = [ 0 ] + [ dims [ - 1 ]] + dims [ 1 : - 1 ] batch = batch . transpose ( * dims_t ) return torch . from_numpy ( batch . astype ( \"float32\" )) def _simple_stack ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Stack the variables list along the last axis. Args: variables_list (list, optional): The list of variables to be stacked. (Default value = None) Returns: A torch tensor of stacked variables.: \"\"\" batch = np . stack ( variables_list , dim =- 1 ) return torch . from_numpy ( batch . astype ( \"float32\" )) def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) __init__ ( dataset = None , input_variables = None , target_variables = None , input_normalizer = None , target_normalizer = None , channels_first = None ) # Batchwise sampler for loading samples from disk and apply normalization if needed. Parameters: Name Type Description Default dataset Group Dataset object containing the samples (Default value = None) None input_variables List [ str ] List of input variables to be loaded (Default value = None) None target_variables List [ str ] List of target variables to be loaded (Default value = None) None input_normalizer callable Function to be applied on the input variables (Default value = None) None target_normalizer callable Function to be applied on the target variables (Default value = None) None channels_first bool Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) None Source code in simulai/batching.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass input_data ( indices = None ) # Retrieve the input data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the input data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of input data: Source code in simulai/batching.py 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) input_shape () # Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: Type Description list A list of integers representing the shape of the input variables.: Source code in simulai/batching.py 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape ) minmax ( batch_size = None , data_interval = None ) # Evaluate the minimum and maximum values of all the target variables in the dataset. Parameters: Name Type Description Default batch_size int Number of samples to use in the evaluation (Default value = None) None data_interval list List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) None Returns: Type Description Tuple [ float , float ] A tuple of minimum and maximum value of the target variables.: Source code in simulai/batching.py 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list ) target_data ( indices = None ) # Retrieve the target data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the target data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of target data: Source code in simulai/batching.py 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) batchdomain_constructor # Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Parameters: Name Type Description Default data_interval list A list of two integers representing the start and end of the data interval. (Default value = None) None batch_size int The desired size of the batches (Default value = None) None batch_indices list A list of indices to be divided into batches. (Default value = None) None Returns: Type Description list A list of lists containing the indices of the input data in the form of batches.: Source code in simulai/batching.py 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 def batchdomain_constructor ( data_interval : list = None , batch_size : int = None , batch_indices : list = None ) -> list : \"\"\"Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Args: data_interval (list, optional): A list of two integers representing the start and end of the data interval. (Default value = None) batch_size (int, optional): The desired size of the batches (Default value = None) batch_indices (list, optional): A list of indices to be divided into batches. (Default value = None) Returns: A list of lists containing the indices of the input data in the form of batches.: \"\"\" if data_interval is not None : interval_size = data_interval [ 1 ] - data_interval [ 0 ] interval = data_interval elif batch_indices is not None : interval_size = len ( batch_indices ) interval = [ batch_indices [ 0 ], batch_indices [ - 1 ]] else : raise Exception ( \"Either data_interval or batch_indices must be provided.\" ) if data_interval is not None : if interval_size < batch_size : batches_ = [ interval [ 0 ], interval [ 1 ]] batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) residual = interval_size % batch_size batch_size_plus = floor ( residual / n_batches ) batch_size_plus_residual = residual % n_batches batch_size_up = batch_size + batch_size_plus batches_ = ( [ interval [ 0 ]] + [ batch_size_up + 1 ] * batch_size_plus_residual + [ batch_size_up ] * ( n_batches - batch_size_plus_residual ) ) batches_ = np . cumsum ( batches_ ) batches = [ batches_ [ i : i + 2 ] for i in range ( batches_ . shape [ 0 ] - 1 )] else : if interval_size < batch_size : batches_ = batch_indices batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) batches_ = np . array_split ( batch_indices , n_batches , axis = 0 ) batches = [ item . tolist () for item in batches_ ] return batches","title":"Simulai batching"},{"location":"simulai_batching/#batching-operations","text":"","title":"Batching operations"},{"location":"simulai_batching/#batchwisesampler","text":"Source code in simulai/batching.py 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 class BatchwiseSampler : def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass # Evaluating the global minimum and maximum for all the # datasets in self.dataset def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list ) def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape ) def _normalization_bypass ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Bypass the normalization. Args: data (np.ndarray, optional): The data to be bypassed. (Default value = None) Returns: Same data: \"\"\" return data def _target_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the target data using the provided normalizer. Args: data (np.ndarray, optional): The target data to be normalized. (Default value = None) Returns: Normalized target data.: \"\"\" return self . target_normalizer ( data = data ) def _input_normalization ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Normalize the input data using the provided normalizer. Args: data (np.ndarray, optional): The input data to be normalized. (Default value = None) Returns: Normalized input data.: \"\"\" return self . input_normalizer ( data = data ) def _transpose_first_channel ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Transpose the first channel of the variables list. Args: variables_list (list, optional): (Default value = None) \"\"\" batch = np . stack ( variables_list , axis =- 1 ) dims = list ( range ( len ( batch . shape ))) dims_t = [ 0 ] + [ dims [ - 1 ]] + dims [ 1 : - 1 ] batch = batch . transpose ( * dims_t ) return torch . from_numpy ( batch . astype ( \"float32\" )) def _simple_stack ( self , variables_list : list = None ) -> torch . Tensor : \"\"\"Stack the variables list along the last axis. Args: variables_list (list, optional): The list of variables to be stacked. (Default value = None) Returns: A torch tensor of stacked variables.: \"\"\" batch = np . stack ( variables_list , dim =- 1 ) return torch . from_numpy ( batch . astype ( \"float32\" )) def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) ) def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) )","title":"BatchwiseSampler"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.__init__","text":"Batchwise sampler for loading samples from disk and apply normalization if needed. Parameters: Name Type Description Default dataset Group Dataset object containing the samples (Default value = None) None input_variables List [ str ] List of input variables to be loaded (Default value = None) None target_variables List [ str ] List of target variables to be loaded (Default value = None) None input_normalizer callable Function to be applied on the input variables (Default value = None) None target_normalizer callable Function to be applied on the target variables (Default value = None) None channels_first bool Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) None Source code in simulai/batching.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 def __init__ ( self , dataset : h5py . Group = None , input_variables : List [ str ] = None , target_variables : List [ str ] = None , input_normalizer : callable = None , target_normalizer : callable = None , channels_first : bool = None , ) -> None : \"\"\"Batchwise sampler for loading samples from disk and apply normalization if needed. Args: dataset (h5py.Group, optional): Dataset object containing the samples (Default value = None) input_variables (List[str], optional): List of input variables to be loaded (Default value = None) target_variables (List[str], optional): List of target variables to be loaded (Default value = None) input_normalizer (callable, optional): Function to be applied on the input variables (Default value = None) target_normalizer (callable, optional): Function to be applied on the target variables (Default value = None) channels_first (bool, optional): Whether the data should be in channels_first format or not. If not provided, will be set to None. (Default value = None) \"\"\" # This import avoids circular importing from simulai.metrics import MinMaxEvaluation self . dataset = dataset self . input_variables = input_variables self . target_variables = target_variables self . input_normalizer = input_normalizer self . target_normalizer = target_normalizer self . channels_first = channels_first if self . channels_first : self . adjust_dimension = self . _transpose_first_channel else : self . adjust_dimension = self . _simple_stack self . minmax_eval = MinMaxEvaluation () # Defining if normalization will be used or not if self . input_normalizer is not None : self . exec_input_normalization = self . _input_normalization else : self . exec_input_normalization = self . _normalization_bypass if self . target_normalizer is not None : self . exec_target_normalization = self . _target_normalization else : self . exec_target_normalization = self . _normalization_bypass","title":"__init__()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.input_data","text":"Retrieve the input data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the input data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of input data: Source code in simulai/batching.py 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def input_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the input data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the input data should be retrieved (Default value = None) Returns: A torch tensor of input data: \"\"\" indices = np . sort ( indices ) variables_arr = [ self . dataset [ i ][ indices ] for i in self . input_variables ] return self . exec_input_normalization ( self . adjust_dimension ( variables_list = variables_arr ) )","title":"input_data()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.input_shape","text":"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: Type Description list A list of integers representing the shape of the input variables.: Source code in simulai/batching.py 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 def input_shape ( self ) -> list : \"\"\"Get the input shape of the dataset. The shape will be adjusted to put the channels dimension first if 'channels_first' is True. Returns: A list of integers representing the shape of the input variables.: \"\"\" if self . channels_first : shape_ = self . dataset [ self . input_variables [ 0 ]] . shape shape = ( shape_ [ 0 ],) + ( len ( self . input_variables ),) + shape_ [ 1 :] else : shape = self . dataset [ self . input_variables [ 0 ]] . shape + ( len ( self . input_variables ), ) return list ( shape )","title":"input_shape()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.minmax","text":"Evaluate the minimum and maximum values of all the target variables in the dataset. Parameters: Name Type Description Default batch_size int Number of samples to use in the evaluation (Default value = None) None data_interval list List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) None Returns: Type Description Tuple [ float , float ] A tuple of minimum and maximum value of the target variables.: Source code in simulai/batching.py 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 def minmax ( self , batch_size : int = None , data_interval : list = None ) -> Tuple [ float , float ]: \"\"\"Evaluate the minimum and maximum values of all the target variables in the dataset. Args: batch_size (int, optional): Number of samples to use in the evaluation (Default value = None) data_interval (list, optional): List of 2 integers representing the starting and ending indexes of the interval in which the values will be evaluated. (Default value = None) Returns: A tuple of minimum and maximum value of the target variables.: \"\"\" min_list = [] max_list = [] for k in self . target_variables : min , max = self . minmax_eval ( dataset = self . dataset [ k ], batch_size = batch_size , data_interval = data_interval , ) min_list . append ( min ) max_list . append ( max ) return np . min ( min_list ), np . max ( max_list )","title":"minmax()"},{"location":"simulai_batching/#simulai.batching.BatchwiseSampler.target_data","text":"Retrieve the target data for the given indices, apply normalization and adjust the dimension Parameters: Name Type Description Default indices ndarray The indices of samples for which the target data should be retrieved (Default value = None) None Returns: Type Description Tensor A torch tensor of target data: Source code in simulai/batching.py 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 def target_data ( self , indices : np . ndarray = None ) -> torch . Tensor : \"\"\"Retrieve the target data for the given indices, apply normalization and adjust the dimension Args: indices (np.ndarray, optional): The indices of samples for which the target data should be retrieved (Default value = None) Returns: A torch tensor of target data: \"\"\" indices = np . sort ( indices ) variables_arr = [ torch . from_numpy ( self . dataset [ i ][ indices ] . astype ( \"float32\" )) for i in self . target_variables ] return self . exec_target_normalization ( self . adjust_dimension ( variables_list = variables_arr ) )","title":"target_data()"},{"location":"simulai_batching/#batchdomain_constructor","text":"Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Parameters: Name Type Description Default data_interval list A list of two integers representing the start and end of the data interval. (Default value = None) None batch_size int The desired size of the batches (Default value = None) None batch_indices list A list of indices to be divided into batches. (Default value = None) None Returns: Type Description list A list of lists containing the indices of the input data in the form of batches.: Source code in simulai/batching.py 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 def batchdomain_constructor ( data_interval : list = None , batch_size : int = None , batch_indices : list = None ) -> list : \"\"\"Create a list of indices of the input data in the form of batches, using either an interval or a list of indices. Args: data_interval (list, optional): A list of two integers representing the start and end of the data interval. (Default value = None) batch_size (int, optional): The desired size of the batches (Default value = None) batch_indices (list, optional): A list of indices to be divided into batches. (Default value = None) Returns: A list of lists containing the indices of the input data in the form of batches.: \"\"\" if data_interval is not None : interval_size = data_interval [ 1 ] - data_interval [ 0 ] interval = data_interval elif batch_indices is not None : interval_size = len ( batch_indices ) interval = [ batch_indices [ 0 ], batch_indices [ - 1 ]] else : raise Exception ( \"Either data_interval or batch_indices must be provided.\" ) if data_interval is not None : if interval_size < batch_size : batches_ = [ interval [ 0 ], interval [ 1 ]] batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) residual = interval_size % batch_size batch_size_plus = floor ( residual / n_batches ) batch_size_plus_residual = residual % n_batches batch_size_up = batch_size + batch_size_plus batches_ = ( [ interval [ 0 ]] + [ batch_size_up + 1 ] * batch_size_plus_residual + [ batch_size_up ] * ( n_batches - batch_size_plus_residual ) ) batches_ = np . cumsum ( batches_ ) batches = [ batches_ [ i : i + 2 ] for i in range ( batches_ . shape [ 0 ] - 1 )] else : if interval_size < batch_size : batches_ = batch_indices batches_ = np . array ( batches_ ) else : # divides data_interval in the maximum amount of pieces such that the individual batches >= batch_size # and the batch_sizes differ at maximum by 1 in size n_batches = floor ( interval_size / batch_size ) batches_ = np . array_split ( batch_indices , n_batches , axis = 0 ) batches = [ item . tolist () for item in batches_ ] return batches","title":"batchdomain_constructor"},{"location":"simulai_file/","text":"red { color: red } File IO # load_pkl # Load a pickle file into a Python object. Parameters: Name Type Description Default path str (Default value = None) None Returns: Type Description Union [ object , None] object or None: Raises: Type Description Exception if the provided path is not a file or cannot be opened Source code in simulai/file.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 def load_pkl ( path : str = None ) -> Union [ object , None ]: \"\"\"Load a pickle file into a Python object. Args: path (str, optional): (Default value = None) Returns: object or None: Raises: Exception: if the provided path is not a file or cannot be opened \"\"\" import pickle filename = os . path . basename ( path ) file_extension = filename . split ( \".\" )[ - 1 ] if file_extension == \"pkl\" : if os . path . isfile ( path ): try : with open ( path , \"rb\" ) as fp : model = pickle . load ( fp ) return model except : raise Exception ( f \"The file { path } could not be opened.\" ) else : raise Exception ( f \"The file { path } is not a file.\" ) else : raise Exception ( f \"The file format { file_extension } is not supported. It must be pickle.\" ) SPFile # Source code in simulai/file.py 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 class SPFile : def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact def _leading_size ( self , first_line : str = None ) -> int : \"\"\"Returns the number of leading white spaces in the given line Args: first_line (str, optional): (Default value = None) Returns: int: number of leading white spaces. \"\"\" leading_whitespaces = len ( first_line ) - len ( first_line . lstrip ()) return leading_whitespaces def _process_code ( self , code : str = None ) -> str : \"\"\"Returns the code string with leading white spaces removed from each line Args: code (str, optional): The code string which to remove the leading whitespaces (Default value = None) Returns: str: The code string with leading white spaces removed. \"\"\" code_lines = code . split ( \" \\n \" ) first_line = code_lines [ 0 ] leading_size = self . _leading_size ( first_line = first_line ) code_lines_ = [ item [ leading_size :] for item in code_lines ] return \" \\n \" . join ( code_lines_ ) def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device ) def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model __init__ ( compact = False ) # Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Parameters: Name Type Description Default compact bool Compress the directory to a tar file or not. Default : False False Source code in simulai/file.py 65 66 67 68 69 70 71 72 73 74 75 76 def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact read ( model_path = None , device = None , template_name = None ) # Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Parameters: Name Type Description Default model_path str Complete path to the model. (Default value = None) None device str Device to load the model onto. (Default value = None) None template_name str (Default value = None) None Returns: Name Type Description NetworkTemplate child of torch.nn.Module The model restored to memory. Source code in simulai/file.py 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model write ( save_dir = None , name = None , template = None , args = None , model = None , device = None ) # Writes the model and its instantiating function to a directory. Parameters: Name Type Description Default save_dir str The absolute directory path to save the model (Default value = None) None name str A name for the model. (Default value = None) None template callable A function for instantiating the model. (Default value = None) None args dict Dictionary containing arguments to be passed to template. (Default value = None) None model NetworkTemplate The model to be saved. (Default value = None) None device str (Default value = None) None Source code in simulai/file.py 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device )","title":"Simulai file"},{"location":"simulai_file/#file-io","text":"","title":"File IO"},{"location":"simulai_file/#load_pkl","text":"Load a pickle file into a Python object. Parameters: Name Type Description Default path str (Default value = None) None Returns: Type Description Union [ object , None] object or None: Raises: Type Description Exception if the provided path is not a file or cannot be opened Source code in simulai/file.py 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 def load_pkl ( path : str = None ) -> Union [ object , None ]: \"\"\"Load a pickle file into a Python object. Args: path (str, optional): (Default value = None) Returns: object or None: Raises: Exception: if the provided path is not a file or cannot be opened \"\"\" import pickle filename = os . path . basename ( path ) file_extension = filename . split ( \".\" )[ - 1 ] if file_extension == \"pkl\" : if os . path . isfile ( path ): try : with open ( path , \"rb\" ) as fp : model = pickle . load ( fp ) return model except : raise Exception ( f \"The file { path } could not be opened.\" ) else : raise Exception ( f \"The file { path } is not a file.\" ) else : raise Exception ( f \"The file format { file_extension } is not supported. It must be pickle.\" )","title":"load_pkl"},{"location":"simulai_file/#spfile","text":"Source code in simulai/file.py 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 class SPFile : def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact def _leading_size ( self , first_line : str = None ) -> int : \"\"\"Returns the number of leading white spaces in the given line Args: first_line (str, optional): (Default value = None) Returns: int: number of leading white spaces. \"\"\" leading_whitespaces = len ( first_line ) - len ( first_line . lstrip ()) return leading_whitespaces def _process_code ( self , code : str = None ) -> str : \"\"\"Returns the code string with leading white spaces removed from each line Args: code (str, optional): The code string which to remove the leading whitespaces (Default value = None) Returns: str: The code string with leading white spaces removed. \"\"\" code_lines = code . split ( \" \\n \" ) first_line = code_lines [ 0 ] leading_size = self . _leading_size ( first_line = first_line ) code_lines_ = [ item [ leading_size :] for item in code_lines ] return \" \\n \" . join ( code_lines_ ) def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device ) def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model","title":"SPFile"},{"location":"simulai_file/#simulai.file.SPFile.__init__","text":"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Parameters: Name Type Description Default compact bool Compress the directory to a tar file or not. Default : False False Source code in simulai/file.py 65 66 67 68 69 70 71 72 73 74 75 76 def __init__ ( self , compact : bool = False ) -> None : \"\"\"Class for handling persistence of Pytorch Module-like objects. SimulAI Persistency File It saves PyTorch Module-like objects in a directory containing the model template and its coefficients dictionary Args: compact (bool, optional): Compress the directory to a tar file or not. Default : False \"\"\" self . compact = compact","title":"__init__()"},{"location":"simulai_file/#simulai.file.SPFile.read","text":"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Parameters: Name Type Description Default model_path str Complete path to the model. (Default value = None) None device str Device to load the model onto. (Default value = None) None template_name str (Default value = None) None Returns: Name Type Description NetworkTemplate child of torch.nn.Module The model restored to memory. Source code in simulai/file.py 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 def read ( self , model_path : str = None , device : str = None , template_name : str = None ) -> NetworkTemplate : \"\"\"Reads a model from the specified file path, imports it as a module, and initializes it as an object of the corresponding class. Args: model_path (str, optional): Complete path to the model. (Default value = None) device (str, optional): Device to load the model onto. (Default value = None) template_name (str, optional): (Default value = None) Returns: NetworkTemplate (child of torch.nn.Module): The model restored to memory. \"\"\" name = os . path . basename ( model_path ) save_dir = model_path sys . path . append ( model_path ) module = importlib . import_module ( name + \"_template\" ) # Restoring template keywords from disk args_filename = os . path . join ( model_path , name + \"_args.json\" ) if os . path . isfile ( args_filename ): args = load_pkl ( path = args_filename ) else : args = None callables = { attr : getattr ( module , attr ) for attr in dir ( module ) if callable ( getattr ( module , attr )) } if len ( callables ) > 1 : if template_name is None : raise Exception ( f \"There are { len ( callables ) } models in the module, please provide a value for name.\" ) else : if args : Model = callables [ template_name ]( ** args ) else : Model = callables [ template_name ]() elif len ( callables ) == 1 : if args : Model = list ( callables . values ())[ 0 ]( ** args ) else : Model = list ( callables . values ())[ 0 ]() else : raise Exception ( \"There is no model template in the module.\" ) Model . load ( save_dir = save_dir , name = name , device = device ) return Model","title":"read()"},{"location":"simulai_file/#simulai.file.SPFile.write","text":"Writes the model and its instantiating function to a directory. Parameters: Name Type Description Default save_dir str The absolute directory path to save the model (Default value = None) None name str A name for the model. (Default value = None) None template callable A function for instantiating the model. (Default value = None) None args dict Dictionary containing arguments to be passed to template. (Default value = None) None model NetworkTemplate The model to be saved. (Default value = None) None device str (Default value = None) None Source code in simulai/file.py 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 def write ( self , save_dir : str = None , name : str = None , template : callable = None , args : dict = None , model : NetworkTemplate = None , device : str = None , ) -> None : \"\"\"Writes the model and its instantiating function to a directory. Args: save_dir (str, optional): The absolute directory path to save the model (Default value = None) name (str, optional): A name for the model. (Default value = None) template (callable, optional): A function for instantiating the model. (Default value = None) args (dict, optional): Dictionary containing arguments to be passed to template. (Default value = None) model (NetworkTemplate, optional): The model to be saved. (Default value = None) device (str, optional): (Default value = None) \"\"\" model_dir = os . path . join ( save_dir , name ) # Saving the template code if not os . path . isdir ( model_dir ): os . mkdir ( model_dir ) template_filename = os . path . join ( model_dir , name + \"_template.py\" ) tfp = open ( template_filename , \"w\" ) code = inspect . getsource ( template ) code = self . _process_code ( code = code ) tfp . write ( code ) args_filename = os . path . join ( model_dir , name + \"_args.pkl\" ) afp = open ( args_filename , \"wb\" ) pickle . dump ( args , afp ) # Saving the model coefficients model . save ( save_dir = model_dir , name = name , device = device )","title":"write()"},{"location":"simulai_io/","text":"red { color: red } simulai.io # ByPassPreparer # Bases: DataPreparer ByPass class, it fills the DataPreparer blank, but does nothing. Source code in simulai/io.py 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 class ByPassPreparer ( DataPreparer ): \"\"\"ByPass class, it fills the DataPreparer blank, but does nothing.\"\"\" name = \"no_preparer\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . dtype = None def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data def prepare_output_structured_data ( self , data : np . ndarray ) -> np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', '>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) Source code in simulai/io.py 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data prepare_input_structured_data ( data ) # Prepare structured input data by converting it to an ndarray. Parameters: Name Type Description Default data recarray required Returns: Type Description ndarray np.ndarray: numpy ndarray version of the input data. Note This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) Source code in simulai/io.py 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data prepare_output_data ( data ) # Prepare output data. Parameters: Name Type Description Default data ndarray required Returns: Type Description ndarray numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) Source code in simulai/io.py 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data prepare_output_structured_data ( data ) # Prepare structured output data by converting it to a recarray. Parameters: Name Type Description Default data ndarray required Returns: Type Description recarray np.recarray: numpy recarray version of the output data. Note This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' (n0, prod(n1, ..., nm)) Source code in simulai/io.py 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class Reshaper ( DataPreparer ): \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm))\"\"\" name = \"reshaper\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . collapsed_shape = None self . dtype = None self . n_features = None def _set_shapes_from_data ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): The input data to reshape. (Default value = None) Example: >>> reshaper = Reshaper() >>> reshaper._set_shapes_from_data(np.random.random((10,3,4,5))) >>> reshaper.collapsible_shapes (3, 4, 5) \"\"\" self . collapsible_shapes = data . shape [ 1 :] self . collapsed_shape = np . prod ( self . collapsible_shapes ) . astype ( int ) self . _is_recarray = data . dtype . names is not None if self . _is_recarray : self . n_features = len ( data . dtype . names ) * self . collapsed_shape else : self . n_features = self . collapsed_shape def _prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function reshapes the input data to (n0, prod(n1, ..., nm)) shape. Example: >>> reshaper = Reshaper() >>> data = np.random.random((10,3,4,5)) >>> reshaper.prepare_input_data(data) array([[0.527, 0.936, ... , 0.812], [0.947, 0.865, ... , 0.947], ..., [0.865, 0.947, ... , 0.865], [0.947, 0.865, ... , 0.947]]) \"\"\" assert len ( data . shape ) > 1 , \"Error! data must have at least two dimensions\" return data . reshape (( data . shape [ 0 ], self . n_features )) def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data ) def _reshape_to_output ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Reshape the data to its original shape before reshaping. Args: data (np.ndarray): Returns: np.ndarray: Note: The original shape of the data is stored in `collapsible_shapes` attribute. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper._set_shapes_from_data(input_data) >>> reshaped_data = reshaper._reshape_to_output(input_data.flatten()) >>> reshaped_data.shape (2, 3, 4) \"\"\" return data . reshape (( data . shape [ 0 ],) + self . collapsible_shapes ) def _prepare_output_data ( self , data : np . ndarray = None , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray, optional): The input data to be prepared, by default None single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" if self . _is_recarray : return self . _prepare_output_structured_data ( data ) else : return self . _reshape_to_output ( data ) def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data ) def _prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" self . dtype = data . dtype self . _set_shapes_from_data ( data ) data_ = recfunctions . structured_to_unstructured ( data ) reshaped_data_ = self . _prepare_input_data ( data_ ) return reshaped_data_ def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data ) def _prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" data = data . reshape ( ( data . shape [ 0 ],) + self . collapsible_shapes + ( len ( self . dtype ),) ) output_data = recfunctions . unstructured_to_structured ( data , self . dtype ) output_data = self . _reshape_to_output ( output_data ) return output_data prepare_input_data ( data ) # Prepare input data for reshaping. Parameters: Name Type Description Default data Union [ ndarray , recarray ] required Returns: Type Description ndarray np.ndarray: Note If data is a structured numpy array, it will be passed to _prepare_input_structured_data function. If data is a plain numpy array, it will be passed to _prepare_input_data function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) Source code in simulai/io.py 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data ) prepare_input_structured_data ( data = None ) # Prepare the input structured data to be in the shape and format expected by the model. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: The prepared input structured data Source code in simulai/io.py 297 298 299 300 301 302 303 304 305 306 307 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data ) prepare_output_data ( data , single = False ) # Prepare the input data to be in the shape and format expected by the model. Parameters: Name Type Description Default data ndarray The input data to be prepared required single bool (Default value = False) False Returns: Type Description ndarray np.ndarray: The prepared input data Source code in simulai/io.py 268 269 270 271 272 273 274 275 276 277 278 279 def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data ) prepare_output_structured_data ( data = None ) # Prepare the output data to be in the shape and format expected by the user. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description recarray np.recarray: The prepared output structured data Source code in simulai/io.py 309 310 311 312 313 314 315 316 317 318 319 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data ) ScalerReshaper # Bases: Reshaper ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data. Source code in simulai/io.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 class ScalerReshaper ( Reshaper ): \"\"\"ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data.\"\"\" name = \"scalerreshaper\" def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data ) def _get_structured_bias_scale ( self , dtype : np . dtype = None ) -> Tuple [ dict , dict ]: \"\"\"Get the bias and scale values for each field of a structured array. Args: dtype (np.dtype, optional): (Default value = None) Returns: Tuple[dict, dict]: A tuple of two dictionaries, the first containing the bias values for each field and the second Note: If the bias and scale attributes are floats, they will be used for all fields. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper._get_structured_bias_scale(np.dtype([('a', float), ('b', float)])) ({'a': 10, 'b': 10}, {'a': 2, 'b': 2}) \"\"\" bias = self . bias if isinstance ( self . bias , float ): bias = { n : self . bias for n in dtype . names } scale = self . scale if isinstance ( self . scale , float ): scale = { n : self . scale for n in dtype . names } return bias , scale def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_structured_data ( self , data : np . ndarray = None , * args , ** kwargs ) -> np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' (n0, prod(n1, ..., nm)) Parameters: Name Type Description Default bias float (Default value = 0.0) 0.0 scale float (Default value = 1.0) 1.0 channels_last bool (Default value = False) False Source code in simulai/io.py 345 346 347 348 349 350 351 352 353 354 355 356 357 358 def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale prepare_input_data ( data = None , * args , ** kwargs ) # Prepare the input data by subtracting the bias and scaling the data. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The input data to be prepared (Default value = None) None *args () **kwargs {} Returns: Type Description ndarray np.ndarray: The prepared input data Note If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) Source code in simulai/io.py 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs ) prepare_input_structured_data ( data = None , * args , ** kwargs ) # Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data recarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () Returns: Type Description ndarray np.ndarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) Source code in simulai/io.py 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs ) prepare_output_data ( data = None , * args , ** kwargs ) # Prepare the output data by scaling it and adding the bias. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The output data to be prepared (Default value = None) None *args () Returns: Type Description ndarray np.ndarray: The prepared output data Note If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) Source code in simulai/io.py 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data ) prepare_output_structured_data ( data = None , * args , ** kwargs ) # Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data ndarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () **kwargs {} Returns: Type Description recarray np.recarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. Source code in simulai/io.py 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 class MapValid ( Reshaper ): \"\"\"MapValid is a reshaper class that converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. \"\"\" name = \"map_valid\" def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ] def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data ) __init__ ( config = None , mask = None , channels_last = True ) # Initialize the MapValid class with the configurations and mask passed as parameters. Parameters: Name Type Description Default config dict configurations dictionary, by default None None mask ( int , NaN , inf , optional ) mask to select the invalid values, by default None None channels_last bool if set to True, move the channel dimension to the last, by default True True Source code in simulai/io.py 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last prepare_input_data ( data = None ) # Internal input data preparer, executed for each label of the structured array Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) Source code in simulai/io.py 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ] prepare_input_structured_data ( data = None ) # This function is used to prepare structured input data for further processing. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data ) prepare_output_data ( data = None ) # Prepare output data for the MapValid operation. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], Source code in simulai/io.py 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data prepare_output_structured_data ( data = None ) # This function is used to prepare structured output data for further processing. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data ) Sampling # Bases: DataPreparer This class is used for sampling data from the input dataset. Source code in simulai/io.py 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 class Sampling ( DataPreparer ): \"\"\"This class is used for sampling data from the input dataset.\"\"\" name = \"sampling\" def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None @property def indices ( self ) -> list : \"\"\"Returns the indices of the data that have been sampled. Returns: list: The indices of the data that have been sampled. Raises: AssertionError: If the indices have not been generated yet. Note: The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1] \"\"\" assert self . sampled_indices is not None , ( \"The indices still were not generate.\" \"Run prepare_input_data or prepare_input_structured_data for getting them.\" ) return sorted ( self . sampled_indices . tolist ()) def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ] def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data indices : list property # Returns the indices of the data that have been sampled. Returns: Name Type Description list list The indices of the data that have been sampled. Raises: Type Description AssertionError If the indices have not been generated yet. Note The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1] __init__ ( choices_fraction = 0.1 , shuffling = False ) # Initializes the Sampling class. Parameters: Name Type Description Default choices_fraction float The fraction of the dataset to be sampled, by default 0.1 0.1 shuffling bool Whether to shuffle the data before sampling, by default False False Source code in simulai/io.py 711 712 713 714 715 716 717 718 719 720 721 722 723 724 def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None prepare_input_data ( data = None , data_interval = None ) # Prepare input data for sampling. Parameters: Name Type Description Default data ndarray The input data. Default is None. None data_interval list The interval of data that should be selected. Default is None, None Returns: Type Description ndarray numpy.ndarray: The sampled data. Note: The data_interval parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) Source code in simulai/io.py 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ] prepare_input_structured_data ( data = None , data_interval = None , batch_size = None , dump_path = None ) # Prepares structured data for further processing. Parameters: Name Type Description Default data Dataset Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) None data_interval list The interval of the data to be prepared, the default shape is [0, data.shape[0]] None batch_size int The size of the batches to be processed, defaults to None None dump_path str (Default value = None) None Returns: Type Description recarray np.recarray: Note The features dimensions of the input data should be 1 in NumPy structured arrays. When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) Source code in simulai/io.py 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data MovingWindow # MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) Source code in simulai/io.py 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 class MovingWindow : r \"\"\"MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \" def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series ) def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :] def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __call__ ( input_data = None , output_data = None ) # Apply Moving Window over the input data Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note It is expected that the input_data and output_data have the same shape This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) Source code in simulai/io.py 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __init__ ( history_size = None , skip_size = 1 , horizon_size = None , full_output = True ) # Initializes the MovingWindow class Parameters: Name Type Description Default history_size int the size of the history window, by default None None skip_size int the number of steps to skip between windows, by default 1 1 horizon_size int the size of the horizon window, by default None None full_output bool flag to use the full output or only the last item, by default True True Source code in simulai/io.py 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \" bypass ( batch ) # Does nothing, returns the input batch. Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: the input array Source code in simulai/io.py 955 956 957 958 959 960 961 962 963 964 965 def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch get_last_item ( batch ) # Get the last item of a batch Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: Note This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) Source code in simulai/io.py 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :] transform ( time_series ) # Applies the moving window over the time_series array. Parameters: Name Type Description Default time_series ndarray required Returns: Type Description ndarray np.ndarray: the transformed array with the windows. Source code in simulai/io.py 943 944 945 946 947 948 949 950 951 952 953 def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series ) SlidingWindow # SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: Name Type Description history_size int The number of history samples to include in each window. skip_size int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] Source code in simulai/io.py 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 class SlidingWindow : r \"\"\"SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: history_size : int The number of history samples to include in each window. skip_size : int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \" def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __call__ ( input_data = None , output_data = None ) # Applies a sliding window operation on the given time series and returns the windowed samples. Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note history_size and horizon_size should be positive integers history_size should be less than the length of input_data input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) Source code in simulai/io.py 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data __init__ ( history_size = None , skip_size = None ) # Initialize the SlidingWindow object. Parameters: Name Type Description Default history_size int The number of history samples to include in each window. (Default value = None) None skip_size int The number of samples to skip between each window. (Default value = None) None Source code in simulai/io.py 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \" apply ( time_series ) # Applies the sliding window to the given time series. Parameters: Name Type Description Default time_series List [ int ] required Returns: Type Description List [ List [ int ]] List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] Source code in simulai/io.py 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples IntersectingBatches # IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes. Source code in simulai/io.py 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 class IntersectingBatches : r \"\"\"IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes.\"\"\" def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m ) def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ]) __call__ ( input_data = None ) # Applies the batching strategy to the input data. Parameters: Name Type Description Default input_data ndarray (Default value = None) None Returns: Type Description Union [ list , ndarray ] Union[list, np.ndarray]: A list of batches or a single batch if full attribute is set to False. Note: - If the full attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] Source code in simulai/io.py 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ]) __init__ ( skip_size = 1 , batch_size = None , full = True ) # Initializes the IntersectingBatches class Parameters: Name Type Description Default skip_size int Number of samples to skip between two windows. (Default value = 1) 1 batch_size int Number of samples to use in each batch. (Default value = None) None full bool Whether to include the last batch or not, even if it's not full. (Default value = True) True Source code in simulai/io.py 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full get_indices ( dim = None ) # It gets just the indices of the shifting Parameters: Name Type Description Default dim int total dimension (Default value = None) None Returns: Type Description ndarray np.ndarray: the shifted indices Source code in simulai/io.py 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m ) BatchwiseExtrapolation # BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: Name Type Description time_id int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape Source code in simulai/io.py 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 class BatchwiseExtrapolation : r \"\"\"BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: time_id : int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape \"\"\" def __init__ ( self , op : callable = None , auxiliary_data : np . ndarray = None ) -> None : self . op = op self . auxiliary_data = auxiliary_data self . time_id = 0 def _simple_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : r \"\"\"Given the current extrapolation dataset, use the last history_size number of rows to create the next state of the dataset. Args: extrapolation_dataset (np.ndarray): The current state of the extrapolation dataset. history_size (int, optional): (Default value = 0) Returns: np.ndarray: The next state of the extrapolation dataset. \"\"\" return extrapolation_dataset [ None , - history_size :, :] def _forcing_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : return np . hstack ( [ extrapolation_dataset [ - history_size :, :], self . auxiliary_data [ self . time_id - history_size : self . time_id , :], ] )[ None , :, :] def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset __call__ ( init_state = None , history_size = None , horizon_size = None , testing_data_size = None ) # A function that performs the extrapolation of the time series. Parameters: Name Type Description Default init_state ndarray initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) None history_size int the size of the history window used in the extrapolation. (Default value = None) None horizon_size int the size of the horizon window used in the extrapolation. (Default value = None) None testing_data_size int (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) Source code in simulai/io.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset BatchCopy # A class for copying data in batches and applying a transformation function. Source code in simulai/io.py 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 class BatchCopy : r \"\"\"A class for copying data in batches and applying a transformation function.\"\"\" def __init__ ( self , channels_last : bool = False ) -> None : self . channels_last = channels_last def _single_copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy data from a single h5py.Dataset to another h5py.Dataset in batches. Args: data (h5py.Dataset, optional): (Default value = None) data_interval (list, optional): The interval of the data to be copied. (Default value = None) batch_size (int, optional): The size of the batch to be copied. (Default value = None) dump_path (str, optional): The path where the new h5py.Dataset will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The new h5py.Dataset after the copy process. Note: - Copy data from data_file.h5/data to data_copy.h5/data with a batch size of 1000: - The input must be an h5py.Dataset. Example: >>> data = h5py.File(\"data_file.h5\", \"r\") >>> batch_copy = BatchCopy() >>> dset = batch_copy._single_copy(data=data[\"data\"], data_interval=[0, 100000], batch_size=1000, dump_path=\"data_copy.h5\") \"\"\" assert isinstance ( data , h5py . Dataset ), \"The input must be h5py.Dataset\" variables_list = data . dtype . names data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def _multiple_copy ( self , data : list = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy and concatenate multiple h5py.Dataset objects into a single h5py.Dataset object. Args: data (list, optional): A list of h5py.Dataset objects to be concatenated. (Default value = None) data_interval (list, optional): A list of two integers indicating the start and end index of the data to be concatenated. (Default value = None) batch_size (int, optional): The number of samples to be processed at a time. (Default value = None) dump_path (str, optional): The file path where the concatenated h5py.Dataset object will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The concatenated h5py.Dataset object. \"\"\" assert all ( [ isinstance ( di , h5py . Dataset ) for di in data ] ), \"All inputs must be h5py.Dataset\" variables_list = sum ([ list ( di . dtype . names ) for di in data ], []) data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data [ 0 ] . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) copy ( data = None , data_interval = None , batch_size = None , dump_path = None , transformation = lambda : data ) # Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Parameters: Name Type Description Default data Dataset input data to be copied (Default value = None) None data_interval list the range of the data to be copied (Default value = None) None batch_size int the size of the batches to be used to copy the data (Default value = None) None dump_path str the path of the file where the data will be copied (Default value = None) None transformation callable (Default value = lambda data: data) lambda : data Returns: Type Description Dataset h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the _multiple_copy function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) Source code in simulai/io.py 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) MakeTensor # This class is used to make torch tensors from numpy arrays or dictionaries. Parameters: Name Type Description Default input_names List [ str ] list of input names. None output_names List [ str ] list of output names. None Note input_tensors will be a list of tensors in case of numpy array and dictionary inputs. The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. The input_data will be converted to float32 dtype. The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) Source code in simulai/io.py 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 class MakeTensor : r \"\"\"This class is used to make torch tensors from numpy arrays or dictionaries. Args: input_names (List[str]): list of input names. output_names (List[str]): list of output names. Note: - input_tensors will be a list of tensors in case of numpy array and dictionary inputs. - The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. - The input_data will be converted to float32 dtype. - The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. - If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) \"\"\" def __init__ ( self , input_names = None , output_names = None ): self . input_names = input_names self . output_names = output_names def _make_tensor ( self , input_data : np . ndarray = None , device : str = \"cpu\" ) -> List [ torch . Tensor ]: r \"\"\"Convert input_data to a list of torch tensors. Args: input_data (np.ndarray, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: List[torch.Tensor]: list of tensors. \"\"\" inputs_list = list ( torch . split ( input_data , 1 , dim =- 1 )) for vv , var in enumerate ( inputs_list ): var . requires_grad = True var = var . to ( device ) inputs_list [ vv ] = var # var = var[..., None] return inputs_list def _make_tensor_dict ( self , input_data : dict = None , device : str = \"cpu\" ) -> dict : r \"\"\"Convert input_data to a dictionary of torch tensors. Args: input_data (dict, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: dict: dictionary of tensors. \"\"\" inputs_dict = dict () for key , item in input_data . items (): item . requires_grad = True item = item . to ( device ) inputs_dict [ key ] = item return inputs_dict def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" ) __call__ ( input_data = None , device = 'cpu' ) # Make tensors from input_data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor , Dict [ str , ndarray ]] input data to be converted. (Default value = None) None device str (Default value = \"cpu\") 'cpu' Returns: Type Description List [ Tensor ] Union[List[torch.Tensor], dict]: Raises: Type Description - Exception Source code in simulai/io.py 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" ) GaussianNoise # Bases: Dataset GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) Source code in simulai/io.py 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 class GaussianNoise ( Dataset ): r \"\"\"GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) \"\"\" def __init__ ( self , stddev : float = 0.01 , input_data : Union [ np . ndarray , Tensor ] = None ): super ( Dataset , self ) . __init__ () self . stddev = stddev if isinstance ( input_data , np . ndarray ): input_data_ = torch . from_numpy ( input_data . astype ( \"float32\" )) else : input_data_ = input_data self . input_data = input_data_ self . data_shape = tuple ( self . input_data . shape ) def size ( self ): return self . data_shape def __call__ ( self ): return ( 1 + self . stddev * torch . randn ( * self . data_shape )) * self . input_data Tokenizer # Wrapper for multiple tokenization approaches Source code in simulai/io.py 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 class Tokenizer : \"\"\"Wrapper for multiple tokenization approaches\"\"\" def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" ) def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs ) def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs ) def _make_time_input_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None , step : float = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) step (float): Size of the timestep. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized input dataset. \"\"\" dim = num_step src = np . repeat ( np . expand_dims ( src , axis = 1 ), dim , axis = 1 ) src_shape = src . shape src_shape_list = list ( src_shape ) src_shape_list [ - 1 ] += 1 src_final = np . zeros ( tuple ( src_shape_list )) src_final [:, :, : - 1 ] = src for i in range ( num_step ): src_final [:, i , - 1 ] += step * i return src_final [: - num_step + 1 ] def _make_time_target_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized target dataset. \"\"\" moving_window = MovingWindow ( history_size = 1 , skip_size = 1 , horizon_size = num_step - 1 ) input_data , output_data = moving_window ( input_data = src , output_data = src ) return np . concatenate ([ input_data , output_data ], axis = 1 ) __init__ ( kind = 'time_indexer' ) # Parameters: Name Type Description Default kind str The kind of tokenization to be used. (Default value = \"time_indexer\") 'time_indexer' Source code in simulai/io.py 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" ) generate_input_tokens ( input_data , ** kwargs ) # Generating the input sequence of tokens. Source code in simulai/io.py 1810 1811 1812 1813 1814 def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs ) generate_target_tokens ( target_data , ** kwargs ) # Generating the target sequence of tokens. Source code in simulai/io.py 1816 1817 1818 1819 1820 def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs )","title":"Simulai io"},{"location":"simulai_io/#simulaiio","text":"","title":"simulai.io"},{"location":"simulai_io/#bypasspreparer","text":"Bases: DataPreparer ByPass class, it fills the DataPreparer blank, but does nothing. Source code in simulai/io.py 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 class ByPassPreparer ( DataPreparer ): \"\"\"ByPass class, it fills the DataPreparer blank, but does nothing.\"\"\" name = \"no_preparer\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . dtype = None def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data def prepare_output_structured_data ( self , data : np . ndarray ) -> np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', '>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) Source code in simulai/io.py 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 def prepare_input_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare input data. Args: data (np.ndarray): Returns: numpy.ndarray: Example: >>> import numpy as np >>> data = np.random.rand(5, 3, 4, 2) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_input_data(data) >>> prepared_data.shape (5, 3, 4, 2) \"\"\" self . collapsible_shapes = data . shape [ 1 :] return data","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.ByPassPreparer.prepare_input_structured_data","text":"Prepare structured input data by converting it to an ndarray. Parameters: Name Type Description Default data recarray required Returns: Type Description ndarray np.ndarray: numpy ndarray version of the input data. Note This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) Source code in simulai/io.py 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 def prepare_input_structured_data ( self , data : np . recarray ) -> np . ndarray : \"\"\"Prepare structured input data by converting it to an ndarray. Args: data (np.recarray): Returns: np.ndarray: numpy ndarray version of the input data. Note: This function is used when the input data is in the form of a structured array and needs to be converted to a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('a', int), ('b', '|S1'), ('c', float)]) >>> preparer = ByPassPreparer() >>> preparer.prepare_input_structured_data(data) array([[1, 'a', 0.5], [2, 'b', 0.6]]) \"\"\" return data","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.ByPassPreparer.prepare_output_data","text":"Prepare output data. Parameters: Name Type Description Default data ndarray required Returns: Type Description ndarray numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) Source code in simulai/io.py 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 def prepare_output_data ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Prepare output data. Args: data (np.ndarray): Returns: numpy.ndarray: The output data in the original format Example: >>> import numpy as np >>> data = np.random.rand(5, 3) >>> preparer = ByPassPreparer() >>> prepared_data = preparer.prepare_output_data(data) >>> prepared_data.shape (5, 3) \"\"\" return data","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.ByPassPreparer.prepare_output_structured_data","text":"Prepare structured output data by converting it to a recarray. Parameters: Name Type Description Default data ndarray required Returns: Type Description recarray np.recarray: numpy recarray version of the output data. Note This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' np . recarray : \"\"\"Prepare structured output data by converting it to a recarray. Args: data (np.ndarray): Returns: np.recarray: numpy recarray version of the output data. Note: This function is used when the output data needs to be in the form of a structured array and is currently in the form of a regular numpy ndarray. Example: >>> import numpy as np >>> data = np.array([[1, 'a', 0.5], [2, 'b', 0.6]]) >>> preparer = ByPassPreparer() >>> preparer.prepare_output_structured_data(data) rec.array([(1, 'a', 0.5), (2, 'b', 0.6)], dtype=[('f0', ' (n0, prod(n1, ..., nm)) Source code in simulai/io.py 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class Reshaper ( DataPreparer ): \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm))\"\"\" name = \"reshaper\" def __init__ ( self , channels_last : bool = False ) -> None : super () . __init__ () self . channels_last = channels_last self . collapsible_shapes = None self . collapsed_shape = None self . dtype = None self . n_features = None def _set_shapes_from_data ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): The input data to reshape. (Default value = None) Example: >>> reshaper = Reshaper() >>> reshaper._set_shapes_from_data(np.random.random((10,3,4,5))) >>> reshaper.collapsible_shapes (3, 4, 5) \"\"\" self . collapsible_shapes = data . shape [ 1 :] self . collapsed_shape = np . prod ( self . collapsible_shapes ) . astype ( int ) self . _is_recarray = data . dtype . names is not None if self . _is_recarray : self . n_features = len ( data . dtype . names ) * self . collapsed_shape else : self . n_features = self . collapsed_shape def _prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function reshapes the input data to (n0, prod(n1, ..., nm)) shape. Example: >>> reshaper = Reshaper() >>> data = np.random.random((10,3,4,5)) >>> reshaper.prepare_input_data(data) array([[0.527, 0.936, ... , 0.812], [0.947, 0.865, ... , 0.947], ..., [0.865, 0.947, ... , 0.865], [0.947, 0.865, ... , 0.947]]) \"\"\" assert len ( data . shape ) > 1 , \"Error! data must have at least two dimensions\" return data . reshape (( data . shape [ 0 ], self . n_features )) def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data ) def _reshape_to_output ( self , data : np . ndarray ) -> np . ndarray : \"\"\"Reshape the data to its original shape before reshaping. Args: data (np.ndarray): Returns: np.ndarray: Note: The original shape of the data is stored in `collapsible_shapes` attribute. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper._set_shapes_from_data(input_data) >>> reshaped_data = reshaper._reshape_to_output(input_data.flatten()) >>> reshaped_data.shape (2, 3, 4) \"\"\" return data . reshape (( data . shape [ 0 ],) + self . collapsible_shapes ) def _prepare_output_data ( self , data : np . ndarray = None , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray, optional): The input data to be prepared, by default None single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" if self . _is_recarray : return self . _prepare_output_structured_data ( data ) else : return self . _reshape_to_output ( data ) def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data ) def _prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" self . dtype = data . dtype self . _set_shapes_from_data ( data ) data_ = recfunctions . structured_to_unstructured ( data ) reshaped_data_ = self . _prepare_input_data ( data_ ) return reshaped_data_ def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data ) def _prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" data = data . reshape ( ( data . shape [ 0 ],) + self . collapsible_shapes + ( len ( self . dtype ),) ) output_data = recfunctions . unstructured_to_structured ( data , self . dtype ) output_data = self . _reshape_to_output ( output_data ) return output_data","title":"Reshaper"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_input_data","text":"Prepare input data for reshaping. Parameters: Name Type Description Default data Union [ ndarray , recarray ] required Returns: Type Description ndarray np.ndarray: Note If data is a structured numpy array, it will be passed to _prepare_input_structured_data function. If data is a plain numpy array, it will be passed to _prepare_input_data function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) Source code in simulai/io.py 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ]) -> np . ndarray : \"\"\"Prepare input data for reshaping. Args: data (Union[np.ndarray, np.recarray]): Returns: np.ndarray: Note: - If `data` is a structured numpy array, it will be passed to `_prepare_input_structured_data` function. - If `data` is a plain numpy array, it will be passed to `_prepare_input_data` function. Example: >>> reshaper = Reshaper() >>> input_data = np.random.rand(2, 3, 4) >>> reshaper.prepare_input_data(input_data) array([[ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.948..., 0.276..., 0.967..., 0.564...], [ 0.276..., 0.948..., 0.564..., 0.967...], [ 0.276..., 0.948..., 0.564..., 0.967...]]) \"\"\" self . _set_shapes_from_data ( data ) if self . _is_recarray : return self . _prepare_input_structured_data ( data ) else : return self . _prepare_input_data ( data )","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_input_structured_data","text":"Prepare the input structured data to be in the shape and format expected by the model. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: The prepared input structured data Source code in simulai/io.py 297 298 299 300 301 302 303 304 305 306 307 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"Prepare the input structured data to be in the shape and format expected by the model. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: The prepared input structured data \"\"\" return self . _prepare_input_structured_data ( data )","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_output_data","text":"Prepare the input data to be in the shape and format expected by the model. Parameters: Name Type Description Default data ndarray The input data to be prepared required single bool (Default value = False) False Returns: Type Description ndarray np.ndarray: The prepared input data Source code in simulai/io.py 268 269 270 271 272 273 274 275 276 277 278 279 def prepare_output_data ( self , data : np . ndarray , single : bool = False ) -> np . ndarray : \"\"\"Prepare the input data to be in the shape and format expected by the model. Args: data (np.ndarray): The input data to be prepared single (bool, optional): (Default value = False) Returns: np.ndarray: The prepared input data \"\"\" return self . _prepare_output_data ( data )","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.Reshaper.prepare_output_structured_data","text":"Prepare the output data to be in the shape and format expected by the user. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description recarray np.recarray: The prepared output structured data Source code in simulai/io.py 309 310 311 312 313 314 315 316 317 318 319 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . recarray : \"\"\"Prepare the output data to be in the shape and format expected by the user. Args: data (np.ndarray, optional): (Default value = None) Returns: np.recarray: The prepared output structured data \"\"\" return self . _prepare_output_structured_data ( data )","title":"prepare_output_structured_data()"},{"location":"simulai_io/#scalerreshaper","text":"Bases: Reshaper ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data. Source code in simulai/io.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 class ScalerReshaper ( Reshaper ): \"\"\"ScalerReshaper is a class that inherits from the Reshaper class and performs additional scaling on the input data.\"\"\" name = \"scalerreshaper\" def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data ) def _get_structured_bias_scale ( self , dtype : np . dtype = None ) -> Tuple [ dict , dict ]: \"\"\"Get the bias and scale values for each field of a structured array. Args: dtype (np.dtype, optional): (Default value = None) Returns: Tuple[dict, dict]: A tuple of two dictionaries, the first containing the bias values for each field and the second Note: If the bias and scale attributes are floats, they will be used for all fields. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper._get_structured_bias_scale(np.dtype([('a', float), ('b', float)])) ({'a': 10, 'b': 10}, {'a': 2, 'b': 2}) \"\"\" bias = self . bias if isinstance ( self . bias , float ): bias = { n : self . bias for n in dtype . names } scale = self . scale if isinstance ( self . scale , float ): scale = { n : self . scale for n in dtype . names } return bias , scale def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs ) def prepare_output_structured_data ( self , data : np . ndarray = None , * args , ** kwargs ) -> np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' (n0, prod(n1, ..., nm)) Parameters: Name Type Description Default bias float (Default value = 0.0) 0.0 scale float (Default value = 1.0) 1.0 channels_last bool (Default value = False) False Source code in simulai/io.py 345 346 347 348 349 350 351 352 353 354 355 356 357 358 def __init__ ( self , bias : float = 0.0 , scale : float = 1.0 , channels_last : bool = False ) -> None : \"\"\"Reshaper converts n-dimensional arrays to two-dimensional ones, performing a simple reshaping operation F: (n0, n1, ..., nm) -> (n0, prod(n1, ..., nm)) Args: bias (float, optional): (Default value = 0.0) scale (float, optional): (Default value = 1.0) channels_last (bool, optional): (Default value = False) \"\"\" super () . __init__ ( channels_last = channels_last ) self . bias = bias self . scale = scale","title":"__init__()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_input_data","text":"Prepare the input data by subtracting the bias and scaling the data. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The input data to be prepared (Default value = None) None *args () **kwargs {} Returns: Type Description ndarray np.ndarray: The prepared input data Note If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) Source code in simulai/io.py 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 def prepare_input_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the input data by subtracting the bias and scaling the data. Args: data (Union[np.ndarray, np.recarray], optional): The input data to be prepared (Default value = None) *args: **kwargs: Returns: np.ndarray: The prepared input data Note: If the input data is a structured array, the method 'prepare_input_structured_data' will be called instead. Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_input_data(np.array([1, 2, 3])) array([-4.5, -3.5, -2.5]) \"\"\" if data . dtype . names is None : return super ( ScalerReshaper , self ) . prepare_input_data ( ( data - self . bias ) / self . scale , * args , ** kwargs ) else : return self . prepare_input_structured_data ( data , * args , ** kwargs )","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_input_structured_data","text":"Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data recarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () Returns: Type Description ndarray np.ndarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) Source code in simulai/io.py 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 def prepare_input_structured_data ( self , data : np . recarray = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.recarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs Returns: np.ndarray: Note: The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[(\"a\", int), (\"b\", int), (\"c\", int)]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_input_structured_data(data) array([[-0.5, 0.33333333, 0.75 ], [ 1.5, 1.66666667, 2. ]]) \"\"\" bias , scale = self . _get_structured_bias_scale ( data . dtype ) data = data . copy () names = data . dtype . names for name in names : data [ name ] = ( data [ name ] - bias [ name ]) / scale [ name ] return super ( ScalerReshaper , self ) . prepare_input_structured_data ( data , * args , ** kwargs )","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_output_data","text":"Prepare the output data by scaling it and adding the bias. Parameters: Name Type Description Default data Union [ ndarray , recarray ] The output data to be prepared (Default value = None) None *args () Returns: Type Description ndarray np.ndarray: The prepared output data Note If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) Source code in simulai/io.py 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 def prepare_output_data ( self , data : Union [ np . ndarray , np . recarray ] = None , * args , ** kwargs ) -> np . ndarray : \"\"\"Prepare the output data by scaling it and adding the bias. Args: data (Union[np.ndarray, np.recarray], optional): The output data to be prepared (Default value = None) *args: **kwargs Returns: np.ndarray: The prepared output data Note: If the input data is a structured array, the method 'prepare_output_structured_data' will be called Example: >>> reshaper = ScalerReshaper(bias=10, scale=2) >>> reshaper.prepare_output_data(np.array([1, 2, 3])) array([12., 14., 16.]) \"\"\" if not self . _is_recarray : return super ( ScalerReshaper , self ) . prepare_output_data ( data * self . scale + self . bias , * args , ** kwargs ) else : return self . prepare_output_structured_data ( data )","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.ScalerReshaper.prepare_output_structured_data","text":"Scale and reshape structured data (np.recarray) before passing it to the next layer. Parameters: Name Type Description Default data ndarray structured data to be transformed (Default value = None) None *args Additional arguments passed to the parent class () **kwargs {} Returns: Type Description recarray np.recarray: Note The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' np . recarray : \"\"\"Scale and reshape structured data (np.recarray) before passing it to the next layer. Args: data (np.ndarray, optional): structured data to be transformed (Default value = None) *args (Additional arguments passed to the parent class): **kwargs: Returns: np.recarray: Note: - The bias and scale parameters are expected to be provided in the form of dictionaries, where keys are field names and values are the corresponding bias and scale values for those fields. Example: >>> data = np.array([[-0.5, 0.33333333, 0.75 ], >>> [ 1.5, 1.66666667, 2. ]]) >>> reshaper = ScalerReshaper(bias={'a': 1, 'b': 2, 'c': 3}, scale={'a': 2, 'b': 3, 'c': 4}) >>> reshaper.prepare_output_structured_data(data) rec.array([(0., 2., 6.), (6., 8., 12.)], dtype=[('a', ' data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. Source code in simulai/io.py 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 class MapValid ( Reshaper ): \"\"\"MapValid is a reshaper class that converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) where n_valids is the number of valid elements in the data array. This class is useful for datasets in which there are invalid data. \"\"\" name = \"map_valid\" def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ] def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data ) def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data )","title":"MapValid"},{"location":"simulai_io/#simulai.io.MapValid.__init__","text":"Initialize the MapValid class with the configurations and mask passed as parameters. Parameters: Name Type Description Default config dict configurations dictionary, by default None None mask ( int , NaN , inf , optional ) mask to select the invalid values, by default None None channels_last bool if set to True, move the channel dimension to the last, by default True True Source code in simulai/io.py 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 def __init__ ( self , config : dict = None , mask = None , channels_last : bool = True ) -> None : \"\"\"Initialize the MapValid class with the configurations and mask passed as parameters. Args: config (dict, optional): configurations dictionary, by default None mask (int, np.NaN, np.inf, optional, optional): mask to select the invalid values, by default None channels_last (bool, optional): if set to True, move the channel dimension to the last, by default True \"\"\" super () . __init__ () self . default_dtype = \"float64\" if mask == 0 or isinstance ( mask , int ): self . replace_mask_with_large_number = False else : self . replace_mask_with_large_number = True self . return_the_same_mask = True for key , value in config . items (): setattr ( self , key , value ) # Default value for very large numbers self . large_number = 1e15 if not mask or self . replace_mask_with_large_number : self . mask = self . large_number else : self . mask = mask self . mask_ = mask for key , value in config . items (): setattr ( self , key , value ) self . valid_indices = None self . original_dimensions = None self . channels_last = channels_last","title":"__init__()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_input_data","text":"Internal input data preparer, executed for each label of the structured array Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) Source code in simulai/io.py 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 def prepare_input_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Internal input data preparer, executed for each label of the structured array Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - MapValid converts n-dimensional arrays to two-dimensional ones performing a valid values mapping operation F: F: data.shape = (n0, n1, ..., nm) -> data'.shape = (n0, n_valids) n_valids = dim([k in data[0, ...] if k != mask]) - WARNING: the invalid positions are expected to be static in relation to n0. Example: >>> data = np.array([[1, 2, 3, 4], [5, 6, 7, 8], [9, 10, 11, 12]]) >>> prepare_input_data(data) array([[1, 2, 3], [5, 6, 7], [9, 10, 11]]) \"\"\" data = super ( MapValid , self ) . prepare_input_data ( data ) if self . mask == self . large_number : self . valid_indices_ = np . where ( data [ 0 , ... ] < self . mask ) elif not str ( self . mask ) . isnumeric () or isinstance ( self . mask , int ): self . valid_indices_ = np . where ( data [ 0 , ... ] != self . mask ) else : raise Exception ( \"The chosen mask {} does not fit in any supported case\" . format ( self . mask ) ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ return data [ valid_indices ]","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_input_structured_data","text":"This function is used to prepare structured input data for further processing. Parameters: Name Type Description Default data recarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 def prepare_input_structured_data ( self , data : np . recarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured input data for further processing. Args: data (np.recarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_input_data' function internally. Example: >>> import numpy as np >>> data = np.array([(1, 2, 3), (4, 5, 6)], dtype=[('a', int), ('b', int), ('c', int)]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_input_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_input_data ( data )","title":"prepare_input_structured_data()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_output_data","text":"Prepare output data for the MapValid operation. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], Source code in simulai/io.py 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 def prepare_output_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Prepare output data for the MapValid operation. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: - The reshaped data will have shape (n0, n_valids) where n0 is the number of samples and n_valids are the number of valid values in the data. - If the return_the_same_mask attribute is set to True, the mask used to select the invalid values will be returned. Otherwise, the reshaped data will be filled with NaN. Example: >>> import numpy as np >>> reshaper = MapValid() >>> data = np.array([[[1, 2, 3], [4, 5, 6]], [[7, 8, 9], [10, 11, 12]]]) >>> reshaper.prepare_output_data(data) array([[[ 1., 2., 3.], [ 4., 5., 6.]], \"\"\" immutable_shape = data . shape [ 0 ] final_shape = ( immutable_shape , self . n_features , ) if self . return_the_same_mask : mask = self . mask_ else : mask = np . NaN # For practical purposes reshaped_data = np . full ( final_shape , mask ) if not reshaped_data . dtype . type == self . default_dtype : reshaped_data = reshaped_data . astype ( self . default_dtype ) samples_dim = data . shape [ 0 ] valid_indices = ( slice ( 0 , samples_dim ),) + self . valid_indices_ reshaped_data [ valid_indices ] = data reshaped_data = super ( MapValid , self ) . prepare_output_data ( reshaped_data ) return reshaped_data","title":"prepare_output_data()"},{"location":"simulai_io/#simulai.io.MapValid.prepare_output_structured_data","text":"This function is used to prepare structured output data for further processing. Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description ndarray np.ndarray: Note This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) Source code in simulai/io.py 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 def prepare_output_structured_data ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"This function is used to prepare structured output data for further processing. Args: data (np.ndarray, optional): (Default value = None) Returns: np.ndarray: Note: This function is a wrapper function that calls the 'prepare_output_data' function internally. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6]]) >>> model = MapValid() >>> prepared_data = MapValid.prepare_output_structured_data(data) >>> prepared_data array([[1, 2, 3], [4, 5, 6]]) \"\"\" return self . prepare_output_data ( data )","title":"prepare_output_structured_data()"},{"location":"simulai_io/#sampling","text":"Bases: DataPreparer This class is used for sampling data from the input dataset. Source code in simulai/io.py 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 class Sampling ( DataPreparer ): \"\"\"This class is used for sampling data from the input dataset.\"\"\" name = \"sampling\" def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None @property def indices ( self ) -> list : \"\"\"Returns the indices of the data that have been sampled. Returns: list: The indices of the data that have been sampled. Raises: AssertionError: If the indices have not been generated yet. Note: The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1] \"\"\" assert self . sampled_indices is not None , ( \"The indices still were not generate.\" \"Run prepare_input_data or prepare_input_structured_data for getting them.\" ) return sorted ( self . sampled_indices . tolist ()) def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ] def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data","title":"Sampling "},{"location":"simulai_io/#simulai.io.Sampling.indices","text":"Returns the indices of the data that have been sampled. Returns: Name Type Description list list The indices of the data that have been sampled. Raises: Type Description AssertionError If the indices have not been generated yet. Note The indices are generated by calling the 'prepare_input_data' or 'prepare_input_structured_data' functions. Example: >>> import numpy as np >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> sampler = Sampling(choices_fraction=0.5, shuffling=True) >>> sampler.prepare_input_data(data) >>> sampler.indices [0, 1]","title":"indices"},{"location":"simulai_io/#simulai.io.Sampling.__init__","text":"Initializes the Sampling class. Parameters: Name Type Description Default choices_fraction float The fraction of the dataset to be sampled, by default 0.1 0.1 shuffling bool Whether to shuffle the data before sampling, by default False False Source code in simulai/io.py 711 712 713 714 715 716 717 718 719 720 721 722 723 724 def __init__ ( self , choices_fraction : float = 0.1 , shuffling : bool = False ) -> None : \"\"\"Initializes the Sampling class. Args: choices_fraction (float, optional): The fraction of the dataset to be sampled, by default 0.1 shuffling (bool, optional): Whether to shuffle the data before sampling, by default False \"\"\" super () . __init__ () self . choices_fraction = choices_fraction self . shuffling = shuffling self . global_indices = None self . sampled_indices = None","title":"__init__()"},{"location":"simulai_io/#simulai.io.Sampling.prepare_input_data","text":"Prepare input data for sampling. Parameters: Name Type Description Default data ndarray The input data. Default is None. None data_interval list The interval of data that should be selected. Default is None, None Returns: Type Description ndarray numpy.ndarray: The sampled data. Note: The data_interval parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) Source code in simulai/io.py 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 def prepare_input_data ( self , data : np . ndarray = None , data_interval : list = None ) -> np . ndarray : \"\"\"Prepare input data for sampling. Args: data (np.ndarray, optional): The input data. Default is None. data_interval (list, optional): The interval of data that should be selected. Default is None, Returns: numpy.ndarray: The sampled data. Note: The `data_interval` parameter must be a list of two integers, specifying the start and end of the interval. Example: >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> data_interval = [3, 7] >>> input_data = sampler.prepare_input_data(data, data_interval) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_choices = int ( self . choices_fraction * n_samples ) self . sampled_indices = self . global_indices . copy () if self . shuffling : np . random . shuffle ( self . sampled_indices ) else : self . sampled_indices = self . sampled_indices self . sampled_indices = np . random . choice ( self . sampled_indices , n_choices ) return data [ self . sampled_indices ]","title":"prepare_input_data()"},{"location":"simulai_io/#simulai.io.Sampling.prepare_input_structured_data","text":"Prepares structured data for further processing. Parameters: Name Type Description Default data Dataset Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) None data_interval list The interval of the data to be prepared, the default shape is [0, data.shape[0]] None batch_size int The size of the batches to be processed, defaults to None None dump_path str (Default value = None) None Returns: Type Description recarray np.recarray: Note The features dimensions of the input data should be 1 in NumPy structured arrays. When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) Source code in simulai/io.py 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 def prepare_input_structured_data ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , ) -> np . recarray : \"\"\"Prepares structured data for further processing. Args: data (h5py.Dataset, optional): Structured array to be prepared, the default shape is (n_samples, 1, *other_dimensions) data_interval (list, optional): The interval of the data to be prepared, the default shape is [0, data.shape[0]] batch_size (int, optional): The size of the batches to be processed, defaults to None dump_path (str, optional): (Default value = None) Returns: np.recarray: Note: - The features dimensions of the input data should be 1 in NumPy structured arrays. - When using a h5py.Dataset as input, a dump_path must be provided Example: >>> data = h5py.File(\"path/to/data.h5\", 'r')['data'] >>> data_interval = [0, data.shape[0]] >>> batch_size = 32 >>> dump_path = \"path/to/dump.h5\" >>> obj = PrepareInputStructuredData() >>> prepared_data = obj.prepare_input_structured_data(data, data_interval, batch_size, dump_path) \"\"\" if data_interval is None : data_interval = [ 0 , data . shape [ 0 ]] n_samples = data_interval [ 1 ] - data_interval [ 0 ] self . global_indices = np . arange ( start = data_interval [ 0 ], stop = data_interval [ 1 ]) n_sampled_preserved = int ( self . choices_fraction * n_samples ) self . sampled_indices = np . random . choice ( self . global_indices , n_sampled_preserved , replace = False ) if isinstance ( data , h5py . Dataset ): if isinstance ( batch_size , MemorySizeEval ): batch_size = batch_size ( max_batches = n_sampled_preserved , shape = data . shape [ 1 :] ) else : pass assert ( dump_path ), \"Using a h5py.Dataset as input data a dump_path must be provided.\" fp = h5py . File ( dump_path , \"w\" ) sampled_data = fp . create_dataset ( \"data\" , shape = ( n_sampled_preserved ,) + data . shape [ 1 :], dtype = data . dtype ) # Constructing the normalization using the reference data batches = indices_batchdomain_constructor ( indices = self . sampled_indices , batch_size = batch_size ) start_ix = 0 for batch_id , batch in enumerate ( batches ): print ( f \"Sampling batch { batch_id + 1 } / { len ( batches ) } batch_size= { len ( batch ) } \" ) finish_ix = start_ix + len ( batch ) sampled_data [ start_ix : finish_ix ] = data [ sorted ( batch )] start_ix = finish_ix if self . shuffling : random . shuffle ( sampled_data ) else : raise Exception ( \"Others cases are still not implemented.\" ) return sampled_data","title":"prepare_input_structured_data()"},{"location":"simulai_io/#movingwindow","text":"MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) Source code in simulai/io.py 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 class MovingWindow : r \"\"\"MovingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon ---- skip Example: >>> import numpy as np >>> data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10]) >>> window = MovingWindow(history_size=3, horizon_size=1) >>> window.transform(data) array([[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9], [8, 9, 10]]) \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \" def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series ) def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :] def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"MovingWindow"},{"location":"simulai_io/#simulai.io.MovingWindow.__call__","text":"Apply Moving Window over the input data Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note It is expected that the input_data and output_data have the same shape This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) Source code in simulai/io.py 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Apply Moving Window over the input data Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple of np.ndarray: The tuple contains two arrays with shapes (n_samples, n_history, n_features) and Note: - It is expected that the input_data and output_data have the same shape - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12], [13, 14, 15]]) >>> mw = MovingWindow(history_size=2, horizon_size=2, skip_size=1) >>> input_data, output_data = mw(data, data) >>> input_data array([[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]]) >>> output_data array([[[ 7, 8, 9], [10, 11, 12]], [[10, 11, 12], [13, 14, 15]]]) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . horizon_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center : center + self . horizon_size , :] input_batches_list . append ( input_batch ) output_batches_list . append ( self . process_batch ( batch = output_batch )) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"__call__()"},{"location":"simulai_io/#simulai.io.MovingWindow.__init__","text":"Initializes the MovingWindow class Parameters: Name Type Description Default history_size int the size of the history window, by default None None skip_size int the number of steps to skip between windows, by default 1 1 horizon_size int the size of the horizon window, by default None None full_output bool flag to use the full output or only the last item, by default True True Source code in simulai/io.py 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 def __init__ ( self , history_size : int = None , skip_size : int = 1 , horizon_size : int = None , full_output : bool = True , ) -> None : r \"\"\"Initializes the MovingWindow class Args: history_size (int, optional): the size of the history window, by default None skip_size (int, optional): the number of steps to skip between windows, by default 1 horizon_size (int, optional): the size of the horizon window, by default None full_output (bool, optional): flag to use the full output or only the last item, by default True \"\"\" self . history_size = history_size self . skip_size = skip_size self . horizon_size = horizon_size self . full_output = full_output if self . full_output == True : self . process_batch = self . bypass else : self . process_batch = self . get_last_item # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert ( horizon_size ), f \"A value for horizon_size must be provided, not { horizon_size } \"","title":"__init__()"},{"location":"simulai_io/#simulai.io.MovingWindow.bypass","text":"Does nothing, returns the input batch. Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: the input array Source code in simulai/io.py 955 956 957 958 959 960 961 962 963 964 965 def bypass ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Does nothing, returns the input batch. Args: batch (np.ndarray): Returns: np.ndarray: the input array \"\"\" return batch","title":"bypass()"},{"location":"simulai_io/#simulai.io.MovingWindow.get_last_item","text":"Get the last item of a batch Parameters: Name Type Description Default batch ndarray required Returns: Type Description ndarray np.ndarray: Note This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) Source code in simulai/io.py 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 def get_last_item ( self , batch : np . ndarray ) -> np . ndarray : r \"\"\"Get the last item of a batch Args: batch (np.ndarray): Returns: np.ndarray: Note: - This method is used internally by the MovingWindow class Example: >>> data = np.array([[1, 2, 3], [4, 5, 6], [7, 8, 9]]) >>> mw.get_last_item(data) array([[7, 8, 9]]) \"\"\" return batch [ - 1 :]","title":"get_last_item()"},{"location":"simulai_io/#simulai.io.MovingWindow.transform","text":"Applies the moving window over the time_series array. Parameters: Name Type Description Default time_series ndarray required Returns: Type Description ndarray np.ndarray: the transformed array with the windows. Source code in simulai/io.py 943 944 945 946 947 948 949 950 951 952 953 def transform ( self , time_series : np . ndarray ) -> np . ndarray : r \"\"\"Applies the moving window over the time_series array. Args: time_series (np.ndarray): Returns: np.ndarray: the transformed array with the windows. \"\"\" return np . ndarray ( time_series )","title":"transform()"},{"location":"simulai_io/#slidingwindow","text":"SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: Name Type Description history_size int The number of history samples to include in each window. skip_size int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] Source code in simulai/io.py 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 class SlidingWindow : r \"\"\"SlidingWindow is applied over a time-series array (2D array), and it is used for creating the necessary augmented data used for LSTM networks, replicating the training windows for each sample in the dataset. The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches Attributes: history_size : int The number of history samples to include in each window. skip_size : int The number of samples to skip between each window. Note: - The difference between SlidingWindow and MovingWindow is that here there is no intersection between two sequential batches. See a graphical example: Example: batch n ---------|--- history | horizon batch n+1 ---------|--- history | horizon Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[1, 2, 3], [2, 3, 4], [3, 4, 5], [4, 5, 6], [5, 6, 7], [6, 7, 8], [7, 8, 9]] \"\"\" def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \" def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"SlidingWindow"},{"location":"simulai_io/#simulai.io.SlidingWindow.__call__","text":"Applies a sliding window operation on the given time series and returns the windowed samples. Parameters: Name Type Description Default input_data ndarray 2D array (time-series) to be used for constructing the history size (Default value = None) None output_data ndarray (Default value = None) None Returns: Type Description Tuple [ ndarray , ndarray ] Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note history_size and horizon_size should be positive integers history_size should be less than the length of input_data input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) Source code in simulai/io.py 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 def __call__ ( self , input_data : np . ndarray = None , output_data : np . ndarray = None ) -> Tuple [ np . ndarray , np . ndarray ]: r \"\"\"Applies a sliding window operation on the given time series and returns the windowed samples. Args: input_data (np.ndarray, optional): 2D array (time-series) to be used for constructing the history size (Default value = None) output_data (np.ndarray, optional): (Default value = None) Returns: Tuple[np.ndarray, np.ndarray]: tuple of np.ndarray with shapes (n_samples, n_history, n_features) and (n_samples, n_horizon, n_features) Note: - history_size and horizon_size should be positive integers - history_size should be less than the length of input_data - input_data and output_data should have the same number of rows Example: >>> data = np.random.rand(10,3) >>> history_size = 3 >>> horizon_size = 2 >>> window = Window(history_size, horizon_size) >>> input_data, output_data = window(data) >>> input_data.shape (4, 3, 3) >>> output_data.shape (4, 2, 3) \"\"\" # It is expected series_data to be a set of time-series with shape # (n_timesteps, n_variables) input_batches_list = list () output_batches_list = list () data_size = input_data . shape [ 0 ] assert input_data . shape [ 0 ] == output_data . shape [ 0 ] center = self . history_size # Loop for covering the entire time-series dataset constructing the # training windows while center + self . skip_size <= data_size : input_batch = input_data [ center - self . history_size : center , :] output_batch = output_data [ center - self . history_size + self . skip_size : center + self . skip_size , : ] input_batches_list . append ( input_batch ) output_batches_list . append ( output_batch ) center += self . skip_size input_data = np . stack ( input_batches_list , 0 ) output_data = np . stack ( output_batches_list , 0 ) return input_data , output_data","title":"__call__()"},{"location":"simulai_io/#simulai.io.SlidingWindow.__init__","text":"Initialize the SlidingWindow object. Parameters: Name Type Description Default history_size int The number of history samples to include in each window. (Default value = None) None skip_size int The number of samples to skip between each window. (Default value = None) None Source code in simulai/io.py 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 def __init__ ( self , history_size : int = None , skip_size : int = None ) -> None : r \"\"\"Initialize the SlidingWindow object. Args: history_size (int, optional): The number of history samples to include in each window. (Default value = None) skip_size (int, optional): The number of samples to skip between each window. (Default value = None) \"\"\" self . history_size = history_size self . skip_size = skip_size # Verifying if history and horizon sizes was provided assert ( history_size ), f \"A value for history_size must be provided, not { history_size } \" assert skip_size , f \"A value for horizon_size must be provided, not { skip_size } \"","title":"__init__()"},{"location":"simulai_io/#simulai.io.SlidingWindow.apply","text":"Applies the sliding window to the given time series. Parameters: Name Type Description Default time_series List [ int ] required Returns: Type Description List [ List [ int ]] List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] Source code in simulai/io.py 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 def apply ( self , time_series : List [ int ]) -> List [ List [ int ]]: r \"\"\"Applies the sliding window to the given time series. Args: time_series (List[int]): Returns: List[List[int]]: Example: >>> window = SlidingWindow(history_size=3, skip_size=1) >>> time_series = [1, 2, 3, 4, 5, 6, 7, 8, 9] >>> windows = window.apply(time_series) >>> windows [[[1, 2, 3], [4, 5, 6]], [[4, 5, 6], [7, 8, 9]], [[7, 8, 9], [10, 11, 12]]] \"\"\" windowed_samples = [] for i in range ( 0 , len ( time_series ) - self . history_size - self . skip_size + 1 ): window = time_series [ i : i + self . history_size + self . skip_size ] windowed_samples . append ( window ) return windowed_samples","title":"apply()"},{"location":"simulai_io/#intersectingbatches","text":"IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes. Source code in simulai/io.py 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 class IntersectingBatches : r \"\"\"IntersectingBatches is a class that is applied over a time-series array (2D array) to create batches of input data for training or testing purposes.\"\"\" def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m ) def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ])","title":"IntersectingBatches"},{"location":"simulai_io/#simulai.io.IntersectingBatches.__call__","text":"Applies the batching strategy to the input data. Parameters: Name Type Description Default input_data ndarray (Default value = None) None Returns: Type Description Union [ list , ndarray ] Union[list, np.ndarray]: A list of batches or a single batch if full attribute is set to False. Note: - If the full attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] Source code in simulai/io.py 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 def __call__ ( self , input_data : np . ndarray = None ) -> Union [ list , np . ndarray ]: r \"\"\"Applies the batching strategy to the input data. Args: input_data (np.ndarray, optional): (Default value = None) Returns: Union[list, np.ndarray]: A list of batches or a single batch if `full` attribute is set to False. Note: - If the `full` attribute is set to True, the last batch will be included even if it's not full. Example: >>> input_data = np.array([[1,2,3], [4,5,6], [7,8,9], [10,11,12]]) >>> batches = IntersectingBatches(skip_size=1, batch_size=2) >>> batches(input_data) [array([[1, 2, 3], [4, 5, 6]]), array([[4, 5, 6], [7, 8, 9]]), array([[ 7, 8, 9], [10, 11, 12]])] \"\"\" input_batches_list = list () data_size = input_data . shape [ 0 ] center = 0 # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size <= data_size : input_batch = input_data [ center : center + self . batch_size ] input_batches_list . append ( input_batch ) center += self . skip_size if self . full == True : return input_batches_list else : return np . vstack ([ item [ - 1 ] for item in input_batches_list ])","title":"__call__()"},{"location":"simulai_io/#simulai.io.IntersectingBatches.__init__","text":"Initializes the IntersectingBatches class Parameters: Name Type Description Default skip_size int Number of samples to skip between two windows. (Default value = 1) 1 batch_size int Number of samples to use in each batch. (Default value = None) None full bool Whether to include the last batch or not, even if it's not full. (Default value = True) True Source code in simulai/io.py 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 def __init__ ( self , skip_size : int = 1 , batch_size : int = None , full : bool = True ) -> None : r \"\"\"Initializes the IntersectingBatches class Args: skip_size (int, optional): Number of samples to skip between two windows. (Default value = 1) batch_size (int, optional): Number of samples to use in each batch. (Default value = None) full (bool, optional): Whether to include the last batch or not, even if it's not full. (Default value = True) \"\"\" assert ( batch_size ), f \"A value for horizon_size must be provided, not { batch_size } \" self . skip_size = skip_size self . batch_size = batch_size self . full = full","title":"__init__()"},{"location":"simulai_io/#simulai.io.IntersectingBatches.get_indices","text":"It gets just the indices of the shifting Parameters: Name Type Description Default dim int total dimension (Default value = None) None Returns: Type Description ndarray np.ndarray: the shifted indices Source code in simulai/io.py 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 def get_indices ( self , dim : int = None ) -> np . ndarray : r \"\"\"It gets just the indices of the shifting Args: dim (int, optional): total dimension (Default value = None) Returns: np.ndarray: the shifted indices \"\"\" center = 0 indices = list () indices_m = list () # Loop for covering the entire time-series dataset constructing the # training windows while center + self . batch_size < dim : index = center + self . batch_size indices . append ( center ) indices_m . append ( index ) center += self . skip_size return np . array ( indices ), np . array ( indices_m )","title":"get_indices()"},{"location":"simulai_io/#batchwiseextrapolation","text":"BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: Name Type Description time_id int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape Source code in simulai/io.py 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 class BatchwiseExtrapolation : r \"\"\"BatchwiseExtraplation uses a time-series regression model and inputs as generated by MovingWindow to continuously extrapolate a dataset. Attributes: time_id : int Examples:: >>> import numpy as np >>> from sklearn.linear_model import LinearRegression >>> model = LinearRegression() >>> op = lambda state: model.predict(state) >>> auxiliary_data = np.random.rand(100, 10) >>> batchwise_extrapolation = BatchwiseExtrapolation(op=op, auxiliary_data=auxiliary_data) >>> init_state = np.random.rand(1, 10, 20) >>> history_size = 3 >>> horizon_size = 2 >>> testing_data_size = 10 >>> extrapolation_dataset = batchwise_extrapolation(init_state, history_size, horizon_size, testing_data_size) >>> extrapolation_dataset.shape \"\"\" def __init__ ( self , op : callable = None , auxiliary_data : np . ndarray = None ) -> None : self . op = op self . auxiliary_data = auxiliary_data self . time_id = 0 def _simple_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : r \"\"\"Given the current extrapolation dataset, use the last history_size number of rows to create the next state of the dataset. Args: extrapolation_dataset (np.ndarray): The current state of the extrapolation dataset. history_size (int, optional): (Default value = 0) Returns: np.ndarray: The next state of the extrapolation dataset. \"\"\" return extrapolation_dataset [ None , - history_size :, :] def _forcing_extrapolation ( self , extrapolation_dataset : np . ndarray , history_size : int = 0 ) -> np . ndarray : return np . hstack ( [ extrapolation_dataset [ - history_size :, :], self . auxiliary_data [ self . time_id - history_size : self . time_id , :], ] )[ None , :, :] def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset","title":"BatchwiseExtrapolation"},{"location":"simulai_io/#simulai.io.BatchwiseExtrapolation.__call__","text":"A function that performs the extrapolation of the time series. Parameters: Name Type Description Default init_state ndarray initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) None history_size int the size of the history window used in the extrapolation. (Default value = None) None horizon_size int the size of the horizon window used in the extrapolation. (Default value = None) None testing_data_size int (Default value = None) None Returns: Type Description ndarray np.ndarray: Note The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) Source code in simulai/io.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 def __call__ ( self , init_state : np . ndarray = None , history_size : int = None , horizon_size : int = None , testing_data_size : int = None , ) -> np . ndarray : r \"\"\"A function that performs the extrapolation of the time series. Args: init_state (np.ndarray, optional): initial state of the time series. It should have the shape (batch_size, history_size, n_series) (Default value = None) history_size (int, optional): the size of the history window used in the extrapolation. (Default value = None) horizon_size (int, optional): the size of the horizon window used in the extrapolation. (Default value = None) testing_data_size (int, optional): (Default value = None) Returns: np.ndarray: Note: The number of series in the initial state must be equal to the number of series in the auxiliary data, if it is provided. Example: >>> model = BatchwiseExtrapolation() #Init state of the time series >>> init_state = np.random.random((1,20,3)) >>> history_size = 10 >>> horizon_size = 5 >>> testing_data_size = 50 #Calling the function >>> output = model(init_state, history_size, horizon_size, testing_data_size) >>> print(output.shape) #(50,3) \"\"\" if isinstance ( self . auxiliary_data , np . ndarray ): n_series = self . auxiliary_data . shape [ - 1 ] else : n_series = 0 current_state = init_state extrapolation_dataset = init_state [ 0 , :, n_series :] self . time_id = history_size if isinstance ( self . auxiliary_data , np . ndarray ): assert ( self . auxiliary_data . shape [ - 1 ] + n_series == init_state . shape [ - 1 ] ), \"Number of series in the initial state must be {} \" . format ( self . auxiliary_data . shape [ - 1 ] ) current_state_constructor = self . _forcing_extrapolation else : current_state_constructor = self . _simple_extrapolation while ( extrapolation_dataset . shape [ 0 ] - history_size + horizon_size <= testing_data_size ): extrapolation = self . op ( current_state ) extrapolation_dataset = np . concatenate ( [ extrapolation_dataset , extrapolation [ 0 ]], 0 ) current_state = current_state_constructor ( extrapolation_dataset , history_size = history_size ) log_str = \"Extrapolation {} \" . format ( self . time_id + 1 - history_size ) sys . stdout . write ( \" \\r \" + log_str ) sys . stdout . flush () self . time_id += horizon_size extrapolation_dataset = extrapolation_dataset [ history_size :, :] return extrapolation_dataset","title":"__call__()"},{"location":"simulai_io/#batchcopy","text":"A class for copying data in batches and applying a transformation function. Source code in simulai/io.py 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 1547 1548 1549 1550 1551 1552 1553 1554 1555 1556 1557 1558 1559 1560 1561 1562 1563 1564 1565 1566 1567 1568 1569 1570 1571 1572 1573 1574 1575 1576 1577 1578 1579 1580 1581 1582 1583 1584 1585 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 class BatchCopy : r \"\"\"A class for copying data in batches and applying a transformation function.\"\"\" def __init__ ( self , channels_last : bool = False ) -> None : self . channels_last = channels_last def _single_copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy data from a single h5py.Dataset to another h5py.Dataset in batches. Args: data (h5py.Dataset, optional): (Default value = None) data_interval (list, optional): The interval of the data to be copied. (Default value = None) batch_size (int, optional): The size of the batch to be copied. (Default value = None) dump_path (str, optional): The path where the new h5py.Dataset will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The new h5py.Dataset after the copy process. Note: - Copy data from data_file.h5/data to data_copy.h5/data with a batch size of 1000: - The input must be an h5py.Dataset. Example: >>> data = h5py.File(\"data_file.h5\", \"r\") >>> batch_copy = BatchCopy() >>> dset = batch_copy._single_copy(data=data[\"data\"], data_interval=[0, 100000], batch_size=1000, dump_path=\"data_copy.h5\") \"\"\" assert isinstance ( data , h5py . Dataset ), \"The input must be h5py.Dataset\" variables_list = data . dtype . names data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def _multiple_copy ( self , data : list = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copy and concatenate multiple h5py.Dataset objects into a single h5py.Dataset object. Args: data (list, optional): A list of h5py.Dataset objects to be concatenated. (Default value = None) data_interval (list, optional): A list of two integers indicating the start and end index of the data to be concatenated. (Default value = None) batch_size (int, optional): The number of samples to be processed at a time. (Default value = None) dump_path (str, optional): The file path where the concatenated h5py.Dataset object will be saved. (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The concatenated h5py.Dataset object. \"\"\" assert all ( [ isinstance ( di , h5py . Dataset ) for di in data ] ), \"All inputs must be h5py.Dataset\" variables_list = sum ([ list ( di . dtype . names ) for di in data ], []) data_shape = ( data_interval [ 1 ] - data_interval [ 0 ],) + data [ 0 ] . shape [ 1 :] data_file = h5py . File ( dump_path , \"w\" ) dtype = [( var , \" len ( dset . shape ): chunk_data = np . squeeze ( chunk_data , axis =- 1 ) else : pass dset [ slice ( * d_batch )] = transformation ( chunk_data [ ... ]) return dset def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , )","title":"BatchCopy"},{"location":"simulai_io/#simulai.io.BatchCopy.copy","text":"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Parameters: Name Type Description Default data Dataset input data to be copied (Default value = None) None data_interval list the range of the data to be copied (Default value = None) None batch_size int the size of the batches to be used to copy the data (Default value = None) None dump_path str the path of the file where the data will be copied (Default value = None) None transformation callable (Default value = lambda data: data) lambda : data Returns: Type Description Dataset h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the _multiple_copy function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) Source code in simulai/io.py 1586 1587 1588 1589 1590 1591 1592 1593 1594 1595 1596 1597 1598 1599 1600 1601 1602 1603 1604 1605 1606 1607 1608 1609 1610 1611 1612 1613 1614 1615 1616 1617 1618 1619 1620 1621 1622 1623 1624 1625 1626 1627 1628 1629 1630 1631 1632 1633 1634 def copy ( self , data : h5py . Dataset = None , data_interval : list = None , batch_size : int = None , dump_path : str = None , transformation : callable = lambda data : data , ) -> h5py . Dataset : r \"\"\"Copies the data from h5py.Dataset to a new h5py.Dataset file. It allows to apply a transformation function to the data. Args: data (h5py.Dataset, optional): input data to be copied (Default value = None) data_interval (list, optional): the range of the data to be copied (Default value = None) batch_size (int, optional): the size of the batches to be used to copy the data (Default value = None) dump_path (str, optional): the path of the file where the data will be copied (Default value = None) transformation (callable, optional): (Default value = lambda data: data) Returns: h5py.Dataset: The copied data Note: - If the data is a list of h5py.Dataset, it will call the `_multiple_copy` function. Example: >>> data = h5py.File('data.h5', 'r') >>> data_interval = [0, 100] >>> batch_size = 1000 >>> dump_path = 'copied_data.h5' >>> transformation = lambda x: x*2 >>> copied_data = copy(data, data_interval, batch_size, dump_path, transformation) \"\"\" if isinstance ( data , list ): return self . _multiple_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , ) else : return self . _single_copy ( data = data , data_interval = data_interval , batch_size = batch_size , dump_path = dump_path , transformation = transformation , )","title":"copy()"},{"location":"simulai_io/#maketensor","text":"This class is used to make torch tensors from numpy arrays or dictionaries. Parameters: Name Type Description Default input_names List [ str ] list of input names. None output_names List [ str ] list of output names. None Note input_tensors will be a list of tensors in case of numpy array and dictionary inputs. The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. The input_data will be converted to float32 dtype. The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) Source code in simulai/io.py 1637 1638 1639 1640 1641 1642 1643 1644 1645 1646 1647 1648 1649 1650 1651 1652 1653 1654 1655 1656 1657 1658 1659 1660 1661 1662 1663 1664 1665 1666 1667 1668 1669 1670 1671 1672 1673 1674 1675 1676 1677 1678 1679 1680 1681 1682 1683 1684 1685 1686 1687 1688 1689 1690 1691 1692 1693 1694 1695 1696 1697 1698 1699 1700 1701 1702 1703 1704 1705 1706 1707 1708 1709 1710 1711 1712 1713 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 class MakeTensor : r \"\"\"This class is used to make torch tensors from numpy arrays or dictionaries. Args: input_names (List[str]): list of input names. output_names (List[str]): list of output names. Note: - input_tensors will be a list of tensors in case of numpy array and dictionary inputs. - The input_data should be numpy array with shape (batch_size, features_size) or dictionary with keys from input_names and values with shape (batch_size, features_size) if input_names and output_names are provided. - The input_data will be converted to float32 dtype. - The input_data will be put on the device specified by the device parameter, which defaults to 'cpu'. - If input_data is None, it will raise an exception. Example: # Creating a MakeTensor object with input and output names # Converting numpy array to torch tensor # Converting dictionary to torch tensors >>> mt = MakeTensor(input_names=[\"input_1\", \"input_2\"], output_names=[\"output\"]) >>> input_data = np.random.randn(10, 3) >>> input_tensors = mt(input_data) >>> input_data = {\"input_1\": np.random.randn(10, 3), \"input_2\": np.random.randn(10, 4)} >>> input_tensors = mt(input_data) \"\"\" def __init__ ( self , input_names = None , output_names = None ): self . input_names = input_names self . output_names = output_names def _make_tensor ( self , input_data : np . ndarray = None , device : str = \"cpu\" ) -> List [ torch . Tensor ]: r \"\"\"Convert input_data to a list of torch tensors. Args: input_data (np.ndarray, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: List[torch.Tensor]: list of tensors. \"\"\" inputs_list = list ( torch . split ( input_data , 1 , dim =- 1 )) for vv , var in enumerate ( inputs_list ): var . requires_grad = True var = var . to ( device ) inputs_list [ vv ] = var # var = var[..., None] return inputs_list def _make_tensor_dict ( self , input_data : dict = None , device : str = \"cpu\" ) -> dict : r \"\"\"Convert input_data to a dictionary of torch tensors. Args: input_data (dict, optional): (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: dict: dictionary of tensors. \"\"\" inputs_dict = dict () for key , item in input_data . items (): item . requires_grad = True item = item . to ( device ) inputs_dict [ key ] = item return inputs_dict def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" )","title":"MakeTensor"},{"location":"simulai_io/#simulai.io.MakeTensor.__call__","text":"Make tensors from input_data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor , Dict [ str , ndarray ]] input data to be converted. (Default value = None) None device str (Default value = \"cpu\") 'cpu' Returns: Type Description List [ Tensor ] Union[List[torch.Tensor], dict]: Raises: Type Description - Exception Source code in simulai/io.py 1714 1715 1716 1717 1718 1719 1720 1721 1722 1723 1724 1725 1726 1727 1728 1729 1730 1731 1732 1733 1734 1735 1736 1737 1738 1739 1740 1741 1742 1743 1744 1745 1746 1747 1748 1749 1750 1751 1752 1753 1754 def __call__ ( self , input_data : Union [ np . ndarray , torch . Tensor , Dict [ str , np . ndarray ]] = None , device : str = \"cpu\" , ) -> List [ torch . Tensor ]: r \"\"\"Make tensors from input_data. Args: input_data (Union[np.ndarray, torch.Tensor, Dict[str, np.ndarray]], optional): input data to be converted. (Default value = None) device (str, optional): (Default value = \"cpu\") Returns: Union[List[torch.Tensor], dict]: Raises: - Exception: \"\"\" if type ( input_data ) == np . ndarray : input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list if type ( input_data ) == torch . Tensor : inputs_list = self . _make_tensor ( input_data = input_data , device = device ) return inputs_list elif type ( input_data ) == dict : inputs_list = self . _make_tensor_dict ( input_data = input_data , device = device ) return inputs_list else : raise Exception ( f \"The type { type ( input_data ) } for input_data is not supported.\" )","title":"__call__()"},{"location":"simulai_io/#gaussiannoise","text":"Bases: Dataset GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) Source code in simulai/io.py 1757 1758 1759 1760 1761 1762 1763 1764 1765 1766 1767 1768 1769 1770 1771 1772 1773 1774 1775 1776 1777 1778 1779 1780 1781 1782 1783 1784 1785 1786 1787 1788 1789 1790 class GaussianNoise ( Dataset ): r \"\"\"GaussianNoise(stddev=0.01, input_data=None) A dataset that applies Gaussian noise to input data. Example: >>> import numpy as np >>> input_data = np.random.rand(100,100) >>> dataset = GaussianNoise(stddev=0.05, input_data=input_data) >>> dataset.size() (100, 100) \"\"\" def __init__ ( self , stddev : float = 0.01 , input_data : Union [ np . ndarray , Tensor ] = None ): super ( Dataset , self ) . __init__ () self . stddev = stddev if isinstance ( input_data , np . ndarray ): input_data_ = torch . from_numpy ( input_data . astype ( \"float32\" )) else : input_data_ = input_data self . input_data = input_data_ self . data_shape = tuple ( self . input_data . shape ) def size ( self ): return self . data_shape def __call__ ( self ): return ( 1 + self . stddev * torch . randn ( * self . data_shape )) * self . input_data","title":"GaussianNoise"},{"location":"simulai_io/#tokenizer","text":"Wrapper for multiple tokenization approaches Source code in simulai/io.py 1792 1793 1794 1795 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 1809 1810 1811 1812 1813 1814 1815 1816 1817 1818 1819 1820 1821 1822 1823 1824 1825 1826 1827 1828 1829 1830 1831 1832 1833 1834 1835 1836 1837 1838 1839 1840 1841 1842 1843 1844 1845 1846 1847 1848 1849 1850 1851 1852 1853 1854 1855 1856 1857 1858 1859 1860 1861 1862 class Tokenizer : \"\"\"Wrapper for multiple tokenization approaches\"\"\" def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" ) def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs ) def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs ) def _make_time_input_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None , step : float = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) step (float): Size of the timestep. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized input dataset. \"\"\" dim = num_step src = np . repeat ( np . expand_dims ( src , axis = 1 ), dim , axis = 1 ) src_shape = src . shape src_shape_list = list ( src_shape ) src_shape_list [ - 1 ] += 1 src_final = np . zeros ( tuple ( src_shape_list )) src_final [:, :, : - 1 ] = src for i in range ( num_step ): src_final [:, i , - 1 ] += step * i return src_final [: - num_step + 1 ] def _make_time_target_sequence ( self , src : Union [ np . ndarray , torch . Tensor ], num_step : int = None ) -> Union [ np . ndarray , torch . Tensor ]: \"\"\"Simple tokenization based on repeating samples and time-indexing them. Args: src (Union[np.ndarray, torch.Tensor]): The dataset to be tokenized. num_step (int): number of timesteps for each batch. (Default value: None) Returns: Union[np.ndarray, torch.Tensor]: The tokenized target dataset. \"\"\" moving_window = MovingWindow ( history_size = 1 , skip_size = 1 , horizon_size = num_step - 1 ) input_data , output_data = moving_window ( input_data = src , output_data = src ) return np . concatenate ([ input_data , output_data ], axis = 1 )","title":"Tokenizer"},{"location":"simulai_io/#simulai.io.Tokenizer.__init__","text":"Parameters: Name Type Description Default kind str The kind of tokenization to be used. (Default value = \"time_indexer\") 'time_indexer' Source code in simulai/io.py 1796 1797 1798 1799 1800 1801 1802 1803 1804 1805 1806 1807 1808 def __init__ ( self , kind : str = \"time_indexer\" ): \"\"\" Args: kind (str): The kind of tokenization to be used. (Default value = \"time_indexer\") \"\"\" self . kind = kind # Tokenizer selection if self . kind == \"time_indexer\" : self . input_tokenizer = self . _make_time_input_sequence self . target_tokenizer = self . _make_time_target_sequence else : raise Exception ( f \"The tokenization option { self . kind } is not available.\" )","title":"__init__()"},{"location":"simulai_io/#simulai.io.Tokenizer.generate_input_tokens","text":"Generating the input sequence of tokens. Source code in simulai/io.py 1810 1811 1812 1813 1814 def generate_input_tokens ( self , input_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the input sequence of tokens.\"\"\" return self . input_tokenizer ( input_data , ** kwargs )","title":"generate_input_tokens()"},{"location":"simulai_io/#simulai.io.Tokenizer.generate_target_tokens","text":"Generating the target sequence of tokens. Source code in simulai/io.py 1816 1817 1818 1819 1820 def generate_target_tokens ( self , target_data : Union [ np . ndarray , torch . Tensor ], ** kwargs ) -> torch . Tensor : \"\"\"Generating the target sequence of tokens.\"\"\" return self . target_tokenizer ( target_data , ** kwargs )","title":"generate_target_tokens()"},{"location":"simulai_parallel/","text":"red { color: red } Parallel # PipelineMPI # PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. Source code in simulai/parallel.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 class PipelineMPI : \"\"\"PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. \"\"\" def __init__ ( self , exec : callable = None , extra_params : dict = None , collect : bool = None , show_log : bool = True , ) -> None : self . exec = exec self . show_log = show_log if extra_params is not None : self . extra_params = extra_params else : self . extra_params = {} self . collect = collect self . comm = MPI . COMM_WORLD self . n_procs = self . comm . Get_size () self . status = ( self . n_procs - 1 ) * [ False ] self . status_dict = dict () def _check_kwargs_consistency ( self , kwargs : dict = None ) -> int : \"\"\"It checks if the kwargs provided for each worker have the same length. Args: kwargs (dict, optional): a dictionary containing the kwargs of all (Default value = None) Returns: int: Length of the batch sent for each worker. \"\"\" types = [ type ( value ) for value in kwargs . values ()] lengths = [ len ( value ) for value in kwargs . values ()] assert all ([ t == list for t in types ]), ( f \"All the elements in kwargs must be list,\" f \" but received { types } .\" ) assert len ( set ( lengths )) == 1 , ( f \"All the elements in kwargs must be the same length,\" f \" but received { lengths } \" ) print ( \"kwargs is alright.\" ) return lengths [ 0 ] def _split_kwargs ( self , kwargs : dict , rank : int , size : int , total_size : int ) -> Tuple [ dict , int ]: \"\"\"It allows the workload be executed serially in each worker node Args: kwargs (dict): A dictionary containing kwargs, which will be distributed for all the workers. rank (int): The index of the rank. size (int): The number of available workers. total_size (int): The total number of elements to be distributed among the workers. Returns: kwargs_batch: A dictionary containing the kwargs to be sent for each worker. batch_size: The batch size, which corresponds to the number of elements to be sent for each worker. \"\"\" # Decrement rank and size by 1, because they are usually 0-indexed in Python size -= 1 rank -= 1 # Calculate batch size and remainder using divmod() function batch_size , remainder = divmod ( total_size , size ) # If rank is less than remainder, calculate kwargs_batch using batch size + 1 if rank < remainder : kwargs_batch = { key : value [ rank * ( batch_size + 1 ) : ( rank + 1 ) * ( batch_size + 1 )] for key , value in kwargs . items () } return kwargs_batch , batch_size + 1 # If rank is not less than remainder, calculate kwargs_batch using batch size else : kwargs_batch = { key : value [ remainder * ( batch_size + 1 ) + ( rank - remainder ) * batch_size : ( rank - remainder + 1 ) * batch_size ] for key , value in kwargs . items () } return kwargs_batch , batch_size def _attribute_dict_output ( self , dicts : list = None ) -> None : root = dict () for e in dicts : root . update ( e ) for key , value in root . items (): self . status_dict [ key ] = value @staticmethod def inner_type ( obj : list = None ): types_list = [ type ( o ) for o in obj ] assert len ( set ( types_list )) == 1 , \"Composed types are not supported.\" return types_list [ 0 ] def _exec_wrapper ( self , kwargs : dict , total_size : int ) -> None : \"\"\"A wrapper method around exec to facilitate the instantiation of each worker. Args: kwargs (dict): A dictionary containing kwargs for the worker. total_size (int): The total number of elements. \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () size = comm . Get_size () size_ = size # Rank 0 is the 'master' node # The worker nodes execute their workload and send a message to # master if rank != 0 : print ( f \"Executing rank { rank } .\" ) kwargs_batch , batch_size = self . _split_kwargs ( kwargs , rank , size_ , total_size ) kwargs_batch_list = [ { key : value [ j ] for key , value in kwargs_batch . items ()} for j in range ( batch_size ) ] out = list () for i in kwargs_batch_list : print ( f \"Executing batch { i [ 'key' ] } in rank { rank } \" ) # Concatenate the rank to the extra parameters i . update ( self . extra_params ) # Appending the result of the operation self.exec to the partial list out . append ( self . exec ( ** i )) if self . collect is True : msg = out else : msg = 1 if self . show_log : print ( f \"Sending the output { msg } to rank 0\" ) comm . send ( msg , dest = 0 ) print ( f \"Execution concluded for rank { rank } .\" ) # The master awaits the responses of each worker node elif rank == 0 : for r in range ( 1 , size ): msg = comm . recv ( source = r ) self . status [ r - 1 ] = msg if self . inner_type ( msg ) == dict : self . _attribute_dict_output ( dicts = msg ) if self . show_log : print ( f \"Rank 0 received { msg } from rank { r } \" ) comm . barrier () @property def success ( self ) -> bool : \"\"\"It returns True if the entire process worked without issues. \"\"\" return all ( self . status ) def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier () success : bool property # It returns True if the entire process worked without issues. run ( kwargs = None ) # It runs the MPI job Parameters: Name Type Description Default kwargs dict A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) None Source code in simulai/parallel.py 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier ()","title":"Simulai parallel"},{"location":"simulai_parallel/#parallel","text":"","title":"Parallel"},{"location":"simulai_parallel/#pipelinempi","text":"PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. Source code in simulai/parallel.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 class PipelineMPI : \"\"\"PipelineMPI class, it orchestrates the instantiation of MPI jobs and distributes the workload among the workers. \"\"\" def __init__ ( self , exec : callable = None , extra_params : dict = None , collect : bool = None , show_log : bool = True , ) -> None : self . exec = exec self . show_log = show_log if extra_params is not None : self . extra_params = extra_params else : self . extra_params = {} self . collect = collect self . comm = MPI . COMM_WORLD self . n_procs = self . comm . Get_size () self . status = ( self . n_procs - 1 ) * [ False ] self . status_dict = dict () def _check_kwargs_consistency ( self , kwargs : dict = None ) -> int : \"\"\"It checks if the kwargs provided for each worker have the same length. Args: kwargs (dict, optional): a dictionary containing the kwargs of all (Default value = None) Returns: int: Length of the batch sent for each worker. \"\"\" types = [ type ( value ) for value in kwargs . values ()] lengths = [ len ( value ) for value in kwargs . values ()] assert all ([ t == list for t in types ]), ( f \"All the elements in kwargs must be list,\" f \" but received { types } .\" ) assert len ( set ( lengths )) == 1 , ( f \"All the elements in kwargs must be the same length,\" f \" but received { lengths } \" ) print ( \"kwargs is alright.\" ) return lengths [ 0 ] def _split_kwargs ( self , kwargs : dict , rank : int , size : int , total_size : int ) -> Tuple [ dict , int ]: \"\"\"It allows the workload be executed serially in each worker node Args: kwargs (dict): A dictionary containing kwargs, which will be distributed for all the workers. rank (int): The index of the rank. size (int): The number of available workers. total_size (int): The total number of elements to be distributed among the workers. Returns: kwargs_batch: A dictionary containing the kwargs to be sent for each worker. batch_size: The batch size, which corresponds to the number of elements to be sent for each worker. \"\"\" # Decrement rank and size by 1, because they are usually 0-indexed in Python size -= 1 rank -= 1 # Calculate batch size and remainder using divmod() function batch_size , remainder = divmod ( total_size , size ) # If rank is less than remainder, calculate kwargs_batch using batch size + 1 if rank < remainder : kwargs_batch = { key : value [ rank * ( batch_size + 1 ) : ( rank + 1 ) * ( batch_size + 1 )] for key , value in kwargs . items () } return kwargs_batch , batch_size + 1 # If rank is not less than remainder, calculate kwargs_batch using batch size else : kwargs_batch = { key : value [ remainder * ( batch_size + 1 ) + ( rank - remainder ) * batch_size : ( rank - remainder + 1 ) * batch_size ] for key , value in kwargs . items () } return kwargs_batch , batch_size def _attribute_dict_output ( self , dicts : list = None ) -> None : root = dict () for e in dicts : root . update ( e ) for key , value in root . items (): self . status_dict [ key ] = value @staticmethod def inner_type ( obj : list = None ): types_list = [ type ( o ) for o in obj ] assert len ( set ( types_list )) == 1 , \"Composed types are not supported.\" return types_list [ 0 ] def _exec_wrapper ( self , kwargs : dict , total_size : int ) -> None : \"\"\"A wrapper method around exec to facilitate the instantiation of each worker. Args: kwargs (dict): A dictionary containing kwargs for the worker. total_size (int): The total number of elements. \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () size = comm . Get_size () size_ = size # Rank 0 is the 'master' node # The worker nodes execute their workload and send a message to # master if rank != 0 : print ( f \"Executing rank { rank } .\" ) kwargs_batch , batch_size = self . _split_kwargs ( kwargs , rank , size_ , total_size ) kwargs_batch_list = [ { key : value [ j ] for key , value in kwargs_batch . items ()} for j in range ( batch_size ) ] out = list () for i in kwargs_batch_list : print ( f \"Executing batch { i [ 'key' ] } in rank { rank } \" ) # Concatenate the rank to the extra parameters i . update ( self . extra_params ) # Appending the result of the operation self.exec to the partial list out . append ( self . exec ( ** i )) if self . collect is True : msg = out else : msg = 1 if self . show_log : print ( f \"Sending the output { msg } to rank 0\" ) comm . send ( msg , dest = 0 ) print ( f \"Execution concluded for rank { rank } .\" ) # The master awaits the responses of each worker node elif rank == 0 : for r in range ( 1 , size ): msg = comm . recv ( source = r ) self . status [ r - 1 ] = msg if self . inner_type ( msg ) == dict : self . _attribute_dict_output ( dicts = msg ) if self . show_log : print ( f \"Rank 0 received { msg } from rank { r } \" ) comm . barrier () @property def success ( self ) -> bool : \"\"\"It returns True if the entire process worked without issues. \"\"\" return all ( self . status ) def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier ()","title":"PipelineMPI"},{"location":"simulai_parallel/#simulai.parallel.PipelineMPI.success","text":"It returns True if the entire process worked without issues.","title":"success"},{"location":"simulai_parallel/#simulai.parallel.PipelineMPI.run","text":"It runs the MPI job Parameters: Name Type Description Default kwargs dict A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) None Source code in simulai/parallel.py 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 def run ( self , kwargs : dict = None ) -> None : \"\"\"It runs the MPI job Args: kwargs (dict, optional): A kwargs dictionary containing chunks of input arguments to be sent for each worker. (Default value = None) \"\"\" comm = MPI . COMM_WORLD rank = comm . Get_rank () total_size = 0 # Checking if the datasets dimensions are in accordance with the expected ones if rank == 0 : total_size = self . _check_kwargs_consistency ( kwargs = kwargs ) total_size = comm . bcast ( total_size , root = 0 ) comm . barrier () # Executing a wrapper containing the parallelized operation self . _exec_wrapper ( kwargs , total_size ) comm . barrier ()","title":"run()"},{"location":"simulai_residuals/","text":"red { color: red } simulai.residuals # Bases: Module The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: Name Type Description object An instance of the SymbolicOperatorClass. Source code in simulai/residuals/_pytorch_residuals.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 class SymbolicOperator ( torch . nn . Module ): \"\"\"The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: object: An instance of the SymbolicOperatorClass. \"\"\" def __init__ ( self , expressions : List [ Union [ sympy . Expr , str ]] = None , input_vars : List [ Union [ sympy . Symbol , str ]] = None , output_vars : List [ Union [ sympy . Symbol , str ]] = None , function : callable = None , gradient : callable = None , keys : str = None , inputs_key = None , constants : dict = None , trainable_parameters : dict = None , external_functions : dict = dict (), processing : str = \"serial\" , device : str = \"cpu\" , engine : str = \"torch\" , auxiliary_expressions : list = None , ) -> None : if engine == \"torch\" : super ( SymbolicOperator , self ) . __init__ () else : pass self . engine = importlib . import_module ( engine ) self . constants = constants if trainable_parameters is not None : self . trainable_parameters = trainable_parameters else : self . trainable_parameters = dict () self . external_functions = external_functions self . processing = processing self . periodic_bc_protected_key = \"periodic\" self . protected_funcs = [ \"cos\" , \"sin\" , \"sqrt\" , \"exp\" ] self . protected_operators = [ \"L\" , \"Div\" , \"Identity\" , \"Kronecker\" ] self . protected_funcs_subs = self . _construct_protected_functions () self . protected_operators_subs = self . _construct_implict_operators () # Configuring the device to be used during the fitting process if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" else : device = \"cuda\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . expressions = [ self . _parse_expression ( expr = expr ) for expr in expressions ] if isinstance ( auxiliary_expressions , dict ): self . auxiliary_expressions = { key : self . _parse_expression ( expr = expr ) for key , expr in auxiliary_expressions . items () } else : self . auxiliary_expressions = auxiliary_expressions self . input_vars = [ self . _parse_variable ( var = var ) for var in input_vars ] self . output_vars = [ self . _parse_variable ( var = var ) for var in output_vars ] self . input_names = [ var . name for var in self . input_vars ] self . output_names = [ var . name for var in self . output_vars ] self . keys = keys if inputs_key != None : self . inputs_key = self . _parse_inputs_key ( inputs_key = inputs_key ) else : self . inputs_key = inputs_key self . all_vars = self . input_vars + self . output_vars if self . inputs_key is not None : self . forward = self . _forward_dict else : self . forward = self . _forward_tensor self . function = function self . diff_symbol = D self . output = None self . f_expressions = list () self . g_expressions = dict () self . feed_vars = None for name in self . output_names : setattr ( self , name , None ) # Defining functions for returning each variable of the regression # function for index , name in enumerate ( self . output_names ): setattr ( self , name , lambda data : self . function . forward ( input_data = data )[ ... , index ][ ... , None ], ) # If no external gradient is provided, use the core gradient evaluator if gradient is None : gradient_function = self . gradient else : gradient_function = gradient subs = { self . diff_symbol . name : gradient_function } subs . update ( self . external_functions ) subs . update ( self . protected_funcs_subs ) for expr in self . expressions : if not callable ( expr ): f_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : f_expr = expr self . f_expressions . append ( f_expr ) if self . auxiliary_expressions is not None : for key , expr in self . auxiliary_expressions . items (): if not callable ( expr ): g_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : g_expr = expr self . g_expressions [ key ] = g_expr # Method for executing the expressions evaluation if self . processing == \"serial\" : self . process_expression = self . _process_expression_serial else : raise Exception ( f \"Processing case { self . processing } not supported.\" ) def _construct_protected_functions ( self ): \"\"\"This function creates a dictionary of protected functions from the engine object attribute. Returns: dict: A dictionary of function names and their corresponding function objects. \"\"\" protected_funcs = { func : getattr ( self . engine , func ) for func in self . protected_funcs } return protected_funcs def _construct_implict_operators ( self ): \"\"\"This function creates a dictionary of protected operators from the operators engine module. Returns: dict: A dictionary of operator names and their corresponding function objects. \"\"\" operators_engine = importlib . import_module ( \"simulai.tokens\" ) protected_operators = { func : getattr ( operators_engine , func ) for func in self . protected_operators } return protected_operators def _parse_key_interval ( self , intv : str ) -> List : begin , end = intv . split ( \",\" ) end = int ( end [: - 1 ]) begin = int ( begin ) end = int ( end + 1 ) return np . arange ( begin , end ) . astype ( int ) . tolist () def _parse_inputs_key ( self , inputs_key : str = None ) -> dict : # Sentences separator: '|' sep = \"|\" # Index identifier: ':' inx = \":\" # Interval identifier intv = \"[\" # Removing possible spaces in the inputs_key string inputs_key = inputs_key . replace ( \" \" , \"\" ) try : split_components = inputs_key . split ( sep ) except ValueError : split_components = inputs_key keys_dict = dict () for s in split_components : try : if len ( s . split ( inx )) > 1 : key , index = s . split ( inx ) if not key in keys_dict : keys_dict [ key ] = list () keys_dict [ key ] . append ( int ( index )) else : keys_dict [ key ] . append ( int ( index )) elif len ( s . split ( intv )) > 1 : key , interval_str = s . split ( intv ) interval = self . _parse_key_interval ( interval_str ) keys_dict [ key ] = interval else : raise ValueError except ValueError : keys_dict [ s ] = - 1 return keys_dict def _collect_data_from_inputs_list ( self , inputs_list : dict = None ) -> list : data = list () for k , v in self . inputs_key . items (): if v == - 1 : if inputs_list [ k ] . shape [ 1 ] == 1 : data_ = [ inputs_list [ k ]] else : data_ = list ( torch . split ( inputs_list [ k ], 1 , dim = 1 )) else : data_ = [ inputs_list [ k ][:, i : i + 1 ] for i in v ] data += data_ return data def _parse_expression ( self , expr = Union [ sympy . Expr , str ]) -> sympy . Expr : \"\"\"Parses the input expression and returns a SymPy expression. Args: expr (Union[sympy.Expr, str], optional, optional): The expression to parse, by default None. It can either be a SymPy expression or a string. Returns: sympy.Expr: The parsed SymPy expression. Raises: Exception: If the `constants` attribute is not defined, and the input expression is a string. \"\"\" if isinstance ( expr , str ): try : expr_ = sympify ( expr , locals = self . protected_operators_subs , evaluate = False ) if self . constants is not None : expr_ = expr_ . subs ( self . constants ) if self . trainable_parameters is not None : expr_ = expr_ . subs ( self . trainable_parameters ) except ValueError : if self . constants is not None : _expr = expr for key , value in self . constants . items (): _expr = _expr . replace ( key , str ( value )) expr_ = parse_expr ( _expr , evaluate = 0 ) else : raise Exception ( \"It is necessary to define a constants dict.\" ) elif callable ( expr ): expr_ = expr else : if self . constants is not None : expr_ = expr . subs ( self . constants ) else : expr_ = expr return expr_ def _parse_variable ( self , var = Union [ sympy . Symbol , str ]) -> sympy . Symbol : \"\"\"Parse the input variable and return a SymPy Symbol. Args: var (Union[sympy.Symbol, str], optional, optional): The input variable, either a SymPy Symbol or a string. (Default value = Union[sympy.Symbol, str]) Returns: sympy.Symbol: A SymPy Symbol representing the input variable. \"\"\" if isinstance ( var , str ): return sympy . Symbol ( var ) else : return var def _forward_tensor ( self , input_data : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward the input tensor through the function. Args: input_data (torch.Tensor, optional): The input tensor. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( input_data = input_data ) def _forward_dict ( self , input_data : dict = None ) -> torch . Tensor : \"\"\"Forward the input dictionary through the function. Args: input_data (dict, optional): The input dictionary. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( ** input_data ) def _process_expression_serial ( self , feed_vars : dict = None ) -> List [ torch . Tensor ]: \"\"\"Process the expression list serially using the given feed variables. Args: feed_vars (dict, optional): The feed variables. (Default value = None) Returns: List[torch.Tensor]: A list of tensors after evaluating the expressions serially. \"\"\" return [ f ( ** feed_vars ) . to ( self . device ) for f in self . f_expressions ] def _process_expression_individual ( self , index : int = None , feed_vars : dict = None ) -> torch . Tensor : \"\"\"Evaluates a single expression specified by index from the f_expressions list with given feed variables. Args: index (int, optional): Index of the expression to be evaluated, by default None feed_vars (dict, optional): Dictionary of feed variables, by default None Returns: torch.Tensor: Result of evaluating the specified expression with given feed variables \"\"\" return self . f_expressions [ index ]( ** feed_vars ) . to ( self . device ) def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars ) def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars ) @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ] def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs ) __call__ ( inputs_data = None ) # Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Parameters: Name Type Description Default inputs_data Union [ ndarray , dict ] Union (Default value = None) None Returns: Name Type Description List [ Tensor ] List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises List [ Tensor ] Raises: Type Description does not match with the inputs_key attribute Source code in simulai/residuals/_pytorch_residuals.py 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars ) eval_expression ( key , inputs_list ) # This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Parameters: Name Type Description Default key str the key used to retrieve the expression from the 'g_expressions' attribute required inputs_list list either a list of arrays, an np.ndarray, or a dict containing the inputs to the function required Returns: Type Description the result of evaluating the expression using the inputs.: Source code in simulai/residuals/_pytorch_residuals.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars ) gradient ( feature , param ) staticmethod # Calculates the gradient of the given feature with respect to the given parameter. Parameters: Name Type Description Default feature Tensor Tensor with the input feature. required param Tensor Tensor with the parameter to calculate the gradient with respect to. required Returns: Type Description torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ] jac ( inputs ) # Calculates the Jacobian of the forward function of the model with respect to its inputs. Parameters: Name Type Description Default inputs Tensor Tensor with the input data to the forward function. required Returns: Type Description torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs )","title":"Simulai residuals"},{"location":"simulai_residuals/#simulairesiduals","text":"Bases: Module The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: Name Type Description object An instance of the SymbolicOperatorClass. Source code in simulai/residuals/_pytorch_residuals.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 class SymbolicOperator ( torch . nn . Module ): \"\"\"The SymbolicOperatorClass is a class that constructs tensor operators using symbolic expressions written in PyTorch. Returns: object: An instance of the SymbolicOperatorClass. \"\"\" def __init__ ( self , expressions : List [ Union [ sympy . Expr , str ]] = None , input_vars : List [ Union [ sympy . Symbol , str ]] = None , output_vars : List [ Union [ sympy . Symbol , str ]] = None , function : callable = None , gradient : callable = None , keys : str = None , inputs_key = None , constants : dict = None , trainable_parameters : dict = None , external_functions : dict = dict (), processing : str = \"serial\" , device : str = \"cpu\" , engine : str = \"torch\" , auxiliary_expressions : list = None , ) -> None : if engine == \"torch\" : super ( SymbolicOperator , self ) . __init__ () else : pass self . engine = importlib . import_module ( engine ) self . constants = constants if trainable_parameters is not None : self . trainable_parameters = trainable_parameters else : self . trainable_parameters = dict () self . external_functions = external_functions self . processing = processing self . periodic_bc_protected_key = \"periodic\" self . protected_funcs = [ \"cos\" , \"sin\" , \"sqrt\" , \"exp\" ] self . protected_operators = [ \"L\" , \"Div\" , \"Identity\" , \"Kronecker\" ] self . protected_funcs_subs = self . _construct_protected_functions () self . protected_operators_subs = self . _construct_implict_operators () # Configuring the device to be used during the fitting process if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" else : device = \"cuda\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . expressions = [ self . _parse_expression ( expr = expr ) for expr in expressions ] if isinstance ( auxiliary_expressions , dict ): self . auxiliary_expressions = { key : self . _parse_expression ( expr = expr ) for key , expr in auxiliary_expressions . items () } else : self . auxiliary_expressions = auxiliary_expressions self . input_vars = [ self . _parse_variable ( var = var ) for var in input_vars ] self . output_vars = [ self . _parse_variable ( var = var ) for var in output_vars ] self . input_names = [ var . name for var in self . input_vars ] self . output_names = [ var . name for var in self . output_vars ] self . keys = keys if inputs_key != None : self . inputs_key = self . _parse_inputs_key ( inputs_key = inputs_key ) else : self . inputs_key = inputs_key self . all_vars = self . input_vars + self . output_vars if self . inputs_key is not None : self . forward = self . _forward_dict else : self . forward = self . _forward_tensor self . function = function self . diff_symbol = D self . output = None self . f_expressions = list () self . g_expressions = dict () self . feed_vars = None for name in self . output_names : setattr ( self , name , None ) # Defining functions for returning each variable of the regression # function for index , name in enumerate ( self . output_names ): setattr ( self , name , lambda data : self . function . forward ( input_data = data )[ ... , index ][ ... , None ], ) # If no external gradient is provided, use the core gradient evaluator if gradient is None : gradient_function = self . gradient else : gradient_function = gradient subs = { self . diff_symbol . name : gradient_function } subs . update ( self . external_functions ) subs . update ( self . protected_funcs_subs ) for expr in self . expressions : if not callable ( expr ): f_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : f_expr = expr self . f_expressions . append ( f_expr ) if self . auxiliary_expressions is not None : for key , expr in self . auxiliary_expressions . items (): if not callable ( expr ): g_expr = sympy . lambdify ( self . all_vars , expr , subs ) else : g_expr = expr self . g_expressions [ key ] = g_expr # Method for executing the expressions evaluation if self . processing == \"serial\" : self . process_expression = self . _process_expression_serial else : raise Exception ( f \"Processing case { self . processing } not supported.\" ) def _construct_protected_functions ( self ): \"\"\"This function creates a dictionary of protected functions from the engine object attribute. Returns: dict: A dictionary of function names and their corresponding function objects. \"\"\" protected_funcs = { func : getattr ( self . engine , func ) for func in self . protected_funcs } return protected_funcs def _construct_implict_operators ( self ): \"\"\"This function creates a dictionary of protected operators from the operators engine module. Returns: dict: A dictionary of operator names and their corresponding function objects. \"\"\" operators_engine = importlib . import_module ( \"simulai.tokens\" ) protected_operators = { func : getattr ( operators_engine , func ) for func in self . protected_operators } return protected_operators def _parse_key_interval ( self , intv : str ) -> List : begin , end = intv . split ( \",\" ) end = int ( end [: - 1 ]) begin = int ( begin ) end = int ( end + 1 ) return np . arange ( begin , end ) . astype ( int ) . tolist () def _parse_inputs_key ( self , inputs_key : str = None ) -> dict : # Sentences separator: '|' sep = \"|\" # Index identifier: ':' inx = \":\" # Interval identifier intv = \"[\" # Removing possible spaces in the inputs_key string inputs_key = inputs_key . replace ( \" \" , \"\" ) try : split_components = inputs_key . split ( sep ) except ValueError : split_components = inputs_key keys_dict = dict () for s in split_components : try : if len ( s . split ( inx )) > 1 : key , index = s . split ( inx ) if not key in keys_dict : keys_dict [ key ] = list () keys_dict [ key ] . append ( int ( index )) else : keys_dict [ key ] . append ( int ( index )) elif len ( s . split ( intv )) > 1 : key , interval_str = s . split ( intv ) interval = self . _parse_key_interval ( interval_str ) keys_dict [ key ] = interval else : raise ValueError except ValueError : keys_dict [ s ] = - 1 return keys_dict def _collect_data_from_inputs_list ( self , inputs_list : dict = None ) -> list : data = list () for k , v in self . inputs_key . items (): if v == - 1 : if inputs_list [ k ] . shape [ 1 ] == 1 : data_ = [ inputs_list [ k ]] else : data_ = list ( torch . split ( inputs_list [ k ], 1 , dim = 1 )) else : data_ = [ inputs_list [ k ][:, i : i + 1 ] for i in v ] data += data_ return data def _parse_expression ( self , expr = Union [ sympy . Expr , str ]) -> sympy . Expr : \"\"\"Parses the input expression and returns a SymPy expression. Args: expr (Union[sympy.Expr, str], optional, optional): The expression to parse, by default None. It can either be a SymPy expression or a string. Returns: sympy.Expr: The parsed SymPy expression. Raises: Exception: If the `constants` attribute is not defined, and the input expression is a string. \"\"\" if isinstance ( expr , str ): try : expr_ = sympify ( expr , locals = self . protected_operators_subs , evaluate = False ) if self . constants is not None : expr_ = expr_ . subs ( self . constants ) if self . trainable_parameters is not None : expr_ = expr_ . subs ( self . trainable_parameters ) except ValueError : if self . constants is not None : _expr = expr for key , value in self . constants . items (): _expr = _expr . replace ( key , str ( value )) expr_ = parse_expr ( _expr , evaluate = 0 ) else : raise Exception ( \"It is necessary to define a constants dict.\" ) elif callable ( expr ): expr_ = expr else : if self . constants is not None : expr_ = expr . subs ( self . constants ) else : expr_ = expr return expr_ def _parse_variable ( self , var = Union [ sympy . Symbol , str ]) -> sympy . Symbol : \"\"\"Parse the input variable and return a SymPy Symbol. Args: var (Union[sympy.Symbol, str], optional, optional): The input variable, either a SymPy Symbol or a string. (Default value = Union[sympy.Symbol, str]) Returns: sympy.Symbol: A SymPy Symbol representing the input variable. \"\"\" if isinstance ( var , str ): return sympy . Symbol ( var ) else : return var def _forward_tensor ( self , input_data : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward the input tensor through the function. Args: input_data (torch.Tensor, optional): The input tensor. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( input_data = input_data ) def _forward_dict ( self , input_data : dict = None ) -> torch . Tensor : \"\"\"Forward the input dictionary through the function. Args: input_data (dict, optional): The input dictionary. (Default value = None) Returns: torch.Tensor: The output tensor after forward pass. \"\"\" return self . function . forward ( ** input_data ) def _process_expression_serial ( self , feed_vars : dict = None ) -> List [ torch . Tensor ]: \"\"\"Process the expression list serially using the given feed variables. Args: feed_vars (dict, optional): The feed variables. (Default value = None) Returns: List[torch.Tensor]: A list of tensors after evaluating the expressions serially. \"\"\" return [ f ( ** feed_vars ) . to ( self . device ) for f in self . f_expressions ] def _process_expression_individual ( self , index : int = None , feed_vars : dict = None ) -> torch . Tensor : \"\"\"Evaluates a single expression specified by index from the f_expressions list with given feed variables. Args: index (int, optional): Index of the expression to be evaluated, by default None feed_vars (dict, optional): Dictionary of feed variables, by default None Returns: torch.Tensor: Result of evaluating the specified expression with given feed variables \"\"\" return self . f_expressions [ index ]( ** feed_vars ) . to ( self . device ) def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars ) def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars ) @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ] def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs )","title":"simulai.residuals"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.__call__","text":"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Parameters: Name Type Description Default inputs_data Union [ ndarray , dict ] Union (Default value = None) None Returns: Name Type Description List [ Tensor ] List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises List [ Tensor ] Raises: Type Description does not match with the inputs_key attribute Source code in simulai/residuals/_pytorch_residuals.py 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def __call__ ( self , inputs_data : Union [ np . ndarray , dict ] = None ) -> List [ torch . Tensor ]: \"\"\"Evaluate the symbolic expression. This function takes either a numpy array or a dictionary of numpy arrays as input. Args: inputs_data (Union[np.ndarray, dict], optional): Union (Default value = None) Returns: List[torch.Tensor]: List[torch.Tensor]: A list of tensors containing the evaluated expressions. Raises: Raises: does: not match with the inputs_key attribute \"\"\" constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_data , device = self . device ) output = self . forward ( input_data = inputs_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list )} if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs_list = self . _collect_data_from_inputs_list ( inputs_list = inputs_list ) inputs = { key : value for key , value in zip ( self . input_names , inputs_list )} else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** outputs , ** inputs } # It returns a list of tensors containing the expressions # evaluated over a domain return self . process_expression ( feed_vars = feed_vars )","title":"__call__()"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.eval_expression","text":"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Parameters: Name Type Description Default key str the key used to retrieve the expression from the 'g_expressions' attribute required inputs_list list either a list of arrays, an np.ndarray, or a dict containing the inputs to the function required Returns: Type Description the result of evaluating the expression using the inputs.: Source code in simulai/residuals/_pytorch_residuals.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 def eval_expression ( self , key , inputs_list ): \"\"\"This function evaluates an expression stored in the class attribute 'g_expressions' using the inputs in 'inputs_list'. If the expression has a periodic boundary condition, the function evaluates the expression at the lower and upper boundaries and returns the difference. If the inputs are provided as a list, they are split into individual tensors and stored in a dictionary with the keys as the input names. If the inputs are provided as an np.ndarray, they are converted to tensors and split along the second axis. If the inputs are provided as a dict, they are extracted using the 'inputs_key' attribute. The inputs, along with the outputs obtained from running the function, are then passed as arguments to the expression using the 'g(**feed_vars)' syntax. Args: key (str): the key used to retrieve the expression from the 'g_expressions' attribute inputs_list (list): either a list of arrays, an np.ndarray, or a dict containing the inputs to the function Returns: the result of evaluating the expression using the inputs.: \"\"\" try : g = self . g_expressions . get ( key ) except : raise Exception ( f \"The expression { key } does not exist.\" ) # Periodic boundary conditions if self . periodic_bc_protected_key in key : assert isinstance ( inputs_list , list ), ( \"When a periodic boundary expression is used,\" \" the input must be a list of arrays.\" ) # Lower bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ 0 ], device = self . device ) inputs_L = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_L = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_L = { ** inputs_L , ** outputs_L } # Upper bound constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) tensors_list = constructor ( input_data = inputs_list [ - 1 ], device = self . device ) inputs_U = { key : value for key , value in zip ( self . input_names , tensors_list ) } output = self . function . forward ( input_data = tensors_list ) output = output . to ( self . device ) # TODO Check if it is necessary outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs_U = { key : value for key , value in zip ( self . output_names , outputs_list ) } feed_vars_U = { ** inputs_U , ** outputs_U } # Evaluating the boundaries equality return g ( ** feed_vars_L ) - g ( ** feed_vars_U ) # The non-periodic cases else : constructor = MakeTensor ( input_names = self . input_names , output_names = self . output_names ) inputs_list = constructor ( input_data = inputs_list , device = self . device ) output = self . function . forward ( input_data = inputs_list ) outputs_list = torch . split ( output , 1 , dim =- 1 ) outputs = { key : value for key , value in zip ( self . output_names , outputs_list ) } if type ( inputs_list ) is list : inputs = { key : value for key , value in zip ( self . input_names , inputs_list ) } elif type ( inputs_list ) is np . ndarray : arrays_list = np . split ( inputs_list , inputs_list . shape [ 1 ], axis = 1 ) tensors_list = [ torch . from_numpy ( arr ) for arr in arrays_list ] for t in tensors_list : t . requires_grad = True inputs = { key : value for key , value in zip ( self . input_names , tensors_list ) } elif type ( inputs_list ) is dict : assert ( self . inputs_key is not None ), \"If inputs_list is dict, \\ it is necessary to provide \\ a key.\" inputs = { key : value for key , value in zip ( self . input_names , inputs_list [ self . inputs_key ] ) } else : raise Exception ( f \"Format { type ( inputs_list ) } not supported \\ for inputs_list\" ) feed_vars = { ** inputs , ** outputs } return g ( ** feed_vars )","title":"eval_expression()"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.gradient","text":"Calculates the gradient of the given feature with respect to the given parameter. Parameters: Name Type Description Default feature Tensor Tensor with the input feature. required param Tensor Tensor with the parameter to calculate the gradient with respect to. required Returns: Type Description torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 @staticmethod def gradient ( feature , param ): \"\"\"Calculates the gradient of the given feature with respect to the given parameter. Args: feature (torch.Tensor): Tensor with the input feature. param (torch.Tensor): Tensor with the parameter to calculate the gradient with respect to. Returns: torch.Tensor: Tensor with the gradient of the feature with respect to the given parameter. Example: >>> feature = torch.tensor([1, 2, 3], dtype=torch.float32) >>> param = torch.tensor([2, 3, 4], dtype=torch.float32) >>> gradient(feature, param) tensor([1., 1., 1.], grad_fn=) \"\"\" grad_ = grad ( feature , param , grad_outputs = torch . ones_like ( feature ), create_graph = True , allow_unused = True , retain_graph = True , ) return grad_ [ 0 ]","title":"gradient()"},{"location":"simulai_residuals/#simulai.residuals.SymbolicOperator.jac","text":"Calculates the Jacobian of the forward function of the model with respect to its inputs. Parameters: Name Type Description Default inputs Tensor Tensor with the input data to the forward function. required Returns: Type Description torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) Source code in simulai/residuals/_pytorch_residuals.py 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 def jac ( self , inputs ): \"\"\"Calculates the Jacobian of the forward function of the model with respect to its inputs. Args: inputs (torch.Tensor): Tensor with the input data to the forward function. Returns: torch.Tensor: Tensor with the Jacobian of the forward function with respect to its inputs. Example: >>> inputs = torch.tensor([[1, 2, 3], [2, 3, 4]], dtype=torch.float32) >>> jac(inputs) tensor([[1., 1., 1.], [1., 1., 1.]], grad_fn=) \"\"\" def inner ( inputs ): return self . forward ( input_data = inputs ) return jacobian ( inner , inputs )","title":"jac()"},{"location":"simulai_models/simulai_models_autoencoder/","text":"red { color: red } AutoEncoder # AutoencoderMLP # Bases: NetworkTemplate This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: Fully-connected encoder Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 class AutoencoderMLP ( NetworkTemplate ): r \"\"\"This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: - Fully-connected encoder - Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER \"\"\" def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict () def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary () def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy () __init__ ( encoder = None , decoder = None , input_dim = None , output_dim = None , latent_dim = None , activation = None , shallow = False , devices = 'cpu' , name = None ) # Initialize the AutoencoderMLP network Parameters: Name Type Description Default encoder DenseNetwork The encoder network architecture. (Default value = None) None decoder DenseNetwork The decoder network architecture. (Default value = None) None input_dim Optional [ int ] The input dimensions of the data, by default None. None output_dim Optional [ int ] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default \"cpu\". 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict () eval_projection ( input_data = None ) # Evaluate the projection of the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 168 169 170 171 172 173 174 175 176 177 178 179 180 def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy () forward ( input_data = None ) # Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed projection ( input_data = None ) # Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent reconstruction ( input_data = None ) # Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed summary () # Prints the summary of the network architecture Source code in simulai/models/_pytorch_models/_autoencoder.py 114 115 116 117 def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary () AutoencoderCNN # Bases: NetworkTemplate This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: The convolutional encoder The bottleneck stage, subdivided in: Fully-connected encoder Fully connected decoder The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 class AutoencoderCNN ( NetworkTemplate ): r \"\"\"This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: - The convolutional encoder - The bottleneck stage, subdivided in: - Fully-connected encoder - Fully connected decoder - The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data ) def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () __init__ ( encoder = None , bottleneck_encoder = None , bottleneck_decoder = None , decoder = None , encoder_activation = 'relu' , input_dim = None , output_dim = None , latent_dim = None , kernel_size = None , activation = None , channels = None , case = None , shallow = False , devices = 'cpu' , name = None , ** kwargs ) # Initialize the AutoencoderCNN network. Parameters: Name Type Description Default encoder ConvolutionalNetwork The encoder network architecture, by default None. None bottleneck_encoder Linear The bottleneck encoder network architecture, by default None. None bottleneck_decoder Linear The bottleneck decoder network architecture, by default None. None decoder ConvolutionalNetwork The decoder network architecture, by default None. None encoder_activation str The activation function used by the encoder network, by default 'relu'. 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimensions of the data, by default None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None kernel_size Optional [ int ] (Default value = None) None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None channels Optional [ int ] The number of channels of the convolutional layers, by default None. None case Optional [ str ] The type of convolutional encoder and decoder to be used, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default 'cpu'. 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () eval ( input_data = None ) # Evaluate the autoencoder on the given dataset. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be evaluated, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data ) forward ( input_data ) # Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed project ( input_data = None ) # Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 461 462 463 464 465 466 467 468 469 470 471 472 473 474 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () projection ( input_data ) # Project input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected. required Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent reconstruct ( input_data = None ) # Reconstructs the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be reconstructed. If not provided, uses the original input data, by default None. None Returns: Type Description ndarray np.ndarray: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 476 477 478 479 480 481 482 483 484 485 486 487 488 489 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () reconstruction ( input_data ) # Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed summary ( input_data = None , input_shape = None , verbose = True ) # Prints the summary of the network architecture. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. (Default value = None) None input_shape list The shape of the input data. (Default value = None) None verbose bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) AutoencoderKoopman # Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Koopman operator Fully connected decoder The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 class AutoencoderKoopman ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Koopman operator - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : if verbose == True : if self . input_dim != None : input_shape = list ( self . input_dim ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () print ( f \"The Koopman Operator has shape: { self . K_op . shape } \" ) self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the bottleneck encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the bottleneck decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m )) def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T ) def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy () def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () __init__ ( encoder = None , bottleneck_encoder = None , bottleneck_decoder = None , decoder = None , input_dim = None , output_dim = None , latent_dim = None , activation = None , channels = None , case = None , architecture = None , shallow = False , use_batch_norm = False , encoder_activation = 'relu' , devices = 'cpu' , name = None ) # Constructs a new instance of the Autoencoder Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None input_dim Optional [ Tuple [ int , ...]] The input dimensions. Used for automatic network generation. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions. Used for automatic network generation. Defaults to None. None latent_dim Optional [ int ] The latent dimensions. Used for automatic network generation. Defaults to None. None activation Optional [ Union [ list , str ]] The activation functions for each layer. Used for automatic network generation. Defaults to None. None channels Optional [ int ] The number of channels. Used for automatic network generation. Defaults to None. None case Optional [ str ] The type of problem. Used for automatic network generation. Defaults to None. None architecture Optional [ str ] The network architecture. Used for automatic network generation. Defaults to None. None shallow Optional [ bool ] Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. False use_batch_norm Optional [ bool ] (Default value = False) False encoder_activation str The activation function for the encoder. Defaults to \"relu\". 'relu' devices Union [ str , list ] The devices to use. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () latent_forward ( input_data = None ) # Evaluates the operation u^{u+1} = K u^{i} Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 820 821 822 823 824 825 826 827 828 829 830 831 832 def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T ) latent_forward_m ( input_data = None , m = 1 ) # Evaluates the operation $u^{u+m} = K^m u^{i}$ Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 805 806 807 808 809 810 811 812 813 814 815 816 817 818 def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m )) predict ( input_data = None , n_steps = 1 ) # Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None n_steps int The number of extrapolations to perform. Defaults to 1. 1 Returns: Type Description ndarray np.ndarray: The predicted reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy () project ( input_data = None ) # Projects the input data into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The projected data. Source code in simulai/models/_pytorch_models/_autoencoder.py 902 903 904 905 906 907 908 909 910 911 912 913 914 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () reconstruct ( input_data = None ) # Reconstructs the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () reconstruction_forward ( input_data = None ) # Evaluates the operation \u0168 = D(E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed reconstruction_forward_m ( input_data = None , m = 1 ) # Evaluates the operation \u0168_m = D(K^m E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m AutoencoderVariational # Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Gaussian noise Fully connected decoder The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 class AutoencoderVariational ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Gaussian noise - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder and bottleneck encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the bottleneck encoder applied to the input data. Note: This function is used for projection of the input data into the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection_with_bottleneck(input_data=input_data) \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the encoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection(input_data=input_data) \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the bottleneck decoder and decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the bottleneck decoder and decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the bottleneck decoder's output. Note: This function is used for reconstruction of the input data from the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> bottleneck_output = autoencoder._projection_with_bottleneck(input_data=input_data) >>> output_data = autoencoder._reconstruction_with_bottleneck(input_data=bottleneck_output) \"\"\" bottleneck_output = self . encoder_activation ( ( self . bottleneck_decoder . forward ( input_data = input_data )) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._reconstruction(input_data=input_data) \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent ) def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 ) def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy () CoVariance ( input_data = None , inv = False , to_numpy = False ) # Computes the covariance matrix of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the covariance matrix, by default None None inv bool If True, returns the inverse of the covariance matrix, by default False False to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance Mu ( input_data = None , to_numpy = False ) # Computes the mean of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the mean, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent ) Sigma ( input_data = None , to_numpy = False ) # Computes the standard deviation of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the standard deviation, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 ) __init__ ( encoder = None , bottleneck_encoder = None , bottleneck_decoder = None , decoder = None , encoder_activation = 'relu' , input_dim = None , output_dim = None , latent_dim = None , activation = None , channels = None , kernel_size = None , case = None , architecture = None , use_batch_norm = False , shallow = False , scale = 0.001 , devices = 'cpu' , name = None , ** kwargs ) # Constructor method. Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None encoder_activation str The activation function to use in the encoder. Defaults to \"relu\". 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimension of the data. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimension of the data. Defaults to None. None latent_dim Optional [ int ] The size of the bottleneck layer. Defaults to None. None activation Optional [ Union [ list , str ]] The activation function to use in the networks. Defaults to None. None channels Optional [ int ] The number of channels in the input data. Defaults to None. None kernel_size Optional [ int ] Convolutional kernel size. (Default value = None) None case Optional [ str ] The name of the autoencoder variant. Defaults to None. None architecture Optional [ str ] The architecture of the networks. Defaults to None. None use_batch_norm Optional [ bool ] (Default value = False) False shallow Optional [ bool ] Whether to use a shallow network architecture. Defaults to False. False scale float The scale of the initialization. Defaults to 1e-3. 0.001 devices Union [ str , list ] The device(s) to use for computation. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () eval ( input_data = None ) # Reconstructs the input data using the mean of the encoded data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy () latent_gaussian_noisy ( input_data = None ) # Generates a noisy latent representation of the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and generate a noisy latent representation, by default None None Returns: Type Description Tensor torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps project ( input_data = None ) # Projects the input data onto the autoencoder's latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to project onto the autoencoder's latent space, by default None None Returns: Type Description ndarray np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy () reconstruct ( input_data = None ) # Reconstructs the input data using the trained autoencoder. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () reconstruction_eval ( input_data = None ) # Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed reconstruction_forward ( input_data = None ) # Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed summary ( input_data = None , input_shape = None , verbose = True , display = True ) # Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] Input data to pass through the encoder, by default None None input_shape list The shape of the input data if input_data is None, by default None None verbose bool (Default value = True) True display bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Type Description Exception If self.input_dim is not a tuple or an integer. AssertionError If input_shape is None when input_data is None. Note The summary method calls the summary method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self )","title":"Simulai models autoencoder"},{"location":"simulai_models/simulai_models_autoencoder/#autoencoder","text":"","title":"AutoEncoder"},{"location":"simulai_models/simulai_models_autoencoder/#autoencodermlp","text":"Bases: NetworkTemplate This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: Fully-connected encoder Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 class AutoencoderMLP ( NetworkTemplate ): r \"\"\"This is an implementation of a Fully-connected AutoEncoder as Reduced Order Model; A MLP autoencoder architecture consists of two stages: - Fully-connected encoder - Fully connected decoder Graphical scheme: | | | | | | Z -> | | | | | -> Z_til | | | | | | ENCODER DECODER \"\"\" def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict () def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary () def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy ()","title":"AutoencoderMLP"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.__init__","text":"Initialize the AutoencoderMLP network Parameters: Name Type Description Default encoder DenseNetwork The encoder network architecture. (Default value = None) None decoder DenseNetwork The decoder network architecture. (Default value = None) None input_dim Optional [ int ] The input dimensions of the data, by default None. None output_dim Optional [ int ] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default \"cpu\". 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 def __init__ ( self , encoder : DenseNetwork = None , decoder : DenseNetwork = None , input_dim : Optional [ int ] = None , output_dim : Optional [ int ] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Initialize the AutoencoderMLP network Args: encoder (DenseNetwork, optional): The encoder network architecture. (Default value = None) decoder (DenseNetwork, optional): The decoder network architecture. (Default value = None) input_dim (Optional[int], optional): The input dimensions of the data, by default None. output_dim (Optional[int], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default \"cpu\". name (str, optional): The name of the network, by default None. \"\"\" super ( AutoencoderMLP , self ) . __init__ ( name = name ) self . weights = list () # This option is used when no network is provided # and it uses default choices for the architectures if encoder == None and decoder == None : encoder , decoder = mlp_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , shallow = shallow , ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.eval_projection","text":"Evaluate the projection of the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 168 169 170 171 172 173 174 175 176 177 178 179 180 def eval_projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the projection of the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" return self . projection ( input_data = input_data ) . detach () . numpy ()","title":"eval_projection()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.forward","text":"Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.projection","text":"Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent","title":"projection()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.reconstruction","text":"Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed, by default None. None Returns: Type Description Tensor torch.Tensor: The dataset reconstructed. Source code in simulai/models/_pytorch_models/_autoencoder.py 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The dataset to be reconstructed, by default None. Returns: torch.Tensor: The dataset reconstructed. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed","title":"reconstruction()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderMLP.summary","text":"Prints the summary of the network architecture Source code in simulai/models/_pytorch_models/_autoencoder.py 114 115 116 117 def summary ( self ) -> None : \"\"\"Prints the summary of the network architecture\"\"\" self . encoder . summary () self . decoder . summary ()","title":"summary()"},{"location":"simulai_models/simulai_models_autoencoder/#autoencodercnn","text":"Bases: NetworkTemplate This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: The convolutional encoder The bottleneck stage, subdivided in: Fully-connected encoder Fully connected decoder The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 class AutoencoderCNN ( NetworkTemplate ): r \"\"\"This is an implementation of a convolutional autoencoder as Reduced Order Model. An autoencoder architecture consists of three stages: - The convolutional encoder - The bottleneck stage, subdivided in: - Fully-connected encoder - Fully connected decoder - The convolutional decoder Graphical scheme Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data ) def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"AutoencoderCNN"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.__init__","text":"Initialize the AutoencoderCNN network. Parameters: Name Type Description Default encoder ConvolutionalNetwork The encoder network architecture, by default None. None bottleneck_encoder Linear The bottleneck encoder network architecture, by default None. None bottleneck_decoder Linear The bottleneck decoder network architecture, by default None. None decoder ConvolutionalNetwork The decoder network architecture, by default None. None encoder_activation str The activation function used by the encoder network, by default 'relu'. 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimensions of the data, by default None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions of the data, by default None. None latent_dim Optional [ int ] The dimensions of the latent space, by default None. None kernel_size Optional [ int ] (Default value = None) None activation Optional [ Union [ list , str ]] The activation functions used by the network, by default None. None channels Optional [ int ] The number of channels of the convolutional layers, by default None. None case Optional [ str ] The type of convolutional encoder and decoder to be used, by default None. None shallow Optional [ bool ] Whether the network should be shallow or not, by default False. False devices Union [ str , list ] The device(s) to be used for allocating subnetworks, by default 'cpu'. 'cpu' name str The name of the network, by default None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 def __init__ ( self , encoder : ConvolutionalNetwork = None , bottleneck_encoder : Linear = None , bottleneck_decoder : Linear = None , decoder : ConvolutionalNetwork = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , kernel_size : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , shallow : Optional [ bool ] = False , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : \"\"\"Initialize the AutoencoderCNN network. Args: encoder (ConvolutionalNetwork, optional): The encoder network architecture, by default None. bottleneck_encoder (Linear, optional): The bottleneck encoder network architecture, by default None. bottleneck_decoder (Linear, optional): The bottleneck decoder network architecture, by default None. decoder (ConvolutionalNetwork, optional): The decoder network architecture, by default None. encoder_activation (str, optional): The activation function used by the encoder network, by default 'relu'. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions of the data, by default None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions of the data, by default None. latent_dim (Optional[int], optional): The dimensions of the latent space, by default None. kernel_size (Optional[int], optional): (Default value = None) activation (Optional[Union[list, str]], optional): The activation functions used by the network, by default None. channels (Optional[int], optional): The number of channels of the convolutional layers, by default None. case (Optional[str], optional): The type of convolutional encoder and decoder to be used, by default None. shallow (Optional[bool], optional): Whether the network should be shallow or not, by default False. devices (Union[str, list], optional): The device(s) to be used for allocating subnetworks, by default 'cpu'. name (str, optional): The name of the network, by default None. **kwargs \"\"\" super ( AutoencoderCNN , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim ( encoder , decoder , bottleneck_encoder , bottleneck_decoder , ) = cnn_autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , kernel_size = kernel_size , channels = channels , case = case , shallow = shallow , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . decoder = self . to_wrap ( entity = decoder , device = self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . weights += self . decoder . weights self . last_encoder_channels = None self . before_flatten_dimension = None self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.eval","text":"Evaluate the autoencoder on the given dataset. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be evaluated, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Evaluate the autoencoder on the given dataset. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be evaluated, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return super () . eval ( input_data = input_data )","title":"eval()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.forward","text":"Execute the complete projection/reconstruction pipeline. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 def forward ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Execute the complete projection/reconstruction pipeline. Args: input_data (Union[np.ndarray, torch.Tensor]): The input dataset. Returns: torch.Tensor: The reconstructed dataset. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.project","text":"Project the input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected, by default None. None Returns: Type Description ndarray np.ndarray: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 461 462 463 464 465 466 467 468 469 470 471 472 473 474 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Project the input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be projected, by default None. Returns: np.ndarray: The dataset projected over the latent space. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy ()","title":"project()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.projection","text":"Project input dataset into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be projected. required Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 @as_tensor def projection ( self , input_data : Union [ np . ndarray , torch . Tensor ]) -> torch . Tensor : \"\"\"Project input dataset into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor]): The dataset to be projected. Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent","title":"projection()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.reconstruct","text":"Reconstructs the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The dataset to be reconstructed. If not provided, uses the original input data, by default None. None Returns: Type Description ndarray np.ndarray: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 476 477 478 479 480 481 482 483 484 485 486 487 488 489 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the latent dataset to the original one. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The dataset to be reconstructed. If not provided, uses the original input data, by default None. Returns: np.ndarray: The reconstructed dataset. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"reconstruct()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.reconstruction","text":"Reconstruct the latent dataset to the original one. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The dataset to be reconstructed. required Returns: Type Description Tensor torch.Tensor: The reconstructed dataset. Source code in simulai/models/_pytorch_models/_autoencoder.py 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 @as_tensor def reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] ) -> torch . Tensor : \"\"\"Reconstruct the latent dataset to the original one. Args: input_data (Union[torch.Tensor, np.ndarray]): The dataset to be reconstructed. Returns: torch.Tensor: The reconstructed dataset. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed","title":"reconstruction()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderCNN.summary","text":"Prints the summary of the network architecture. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input dataset. (Default value = None) None input_shape list The shape of the input data. (Default value = None) None verbose bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The dataset projected over the latent space. Source code in simulai/models/_pytorch_models/_autoencoder.py 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : \"\"\"Prints the summary of the network architecture. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input dataset. (Default value = None) input_shape (list, optional): The shape of the input data. (Default value = None) verbose (bool, optional): (Default value = True) Returns: torch.Tensor: The dataset projected over the latent space. \"\"\" if verbose == True : if self . input_dim != None : input_shape = self . input_dim else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self )","title":"summary()"},{"location":"simulai_models/simulai_models_autoencoder/#autoencoderkoopman","text":"Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Koopman operator Fully connected decoder The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 class AutoencoderKoopman ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a Reduced Order Model. A Koopman autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Koopman operator - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme (Koopman OPERATOR) ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , ) -> torch . Tensor : if verbose == True : if self . input_dim != None : input_shape = list ( self . input_dim ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape = self . encoder . input_size input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary () print ( f \"The Koopman Operator has shape: { self . K_op . shape } \" ) self . bottleneck_decoder . summary () bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) self . decoder . summary ( input_data = bottleneck_output , device = self . device ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the bottleneck encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Computes the projection of the input data onto the encoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The projected latent representation. \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the bottleneck decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = input_data ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Reconstructs the input data using the decoder. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m )) def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T ) def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy () def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"AutoencoderKoopman"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.__init__","text":"Constructs a new instance of the Autoencoder Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None input_dim Optional [ Tuple [ int , ...]] The input dimensions. Used for automatic network generation. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimensions. Used for automatic network generation. Defaults to None. None latent_dim Optional [ int ] The latent dimensions. Used for automatic network generation. Defaults to None. None activation Optional [ Union [ list , str ]] The activation functions for each layer. Used for automatic network generation. Defaults to None. None channels Optional [ int ] The number of channels. Used for automatic network generation. Defaults to None. None case Optional [ str ] The type of problem. Used for automatic network generation. Defaults to None. None architecture Optional [ str ] The network architecture. Used for automatic network generation. Defaults to None. None shallow Optional [ bool ] Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. False use_batch_norm Optional [ bool ] (Default value = False) False encoder_activation str The activation function for the encoder. Defaults to \"relu\". 'relu' devices Union [ str , list ] The devices to use. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , shallow : Optional [ bool ] = False , use_batch_norm : Optional [ bool ] = False , encoder_activation : str = \"relu\" , devices : Union [ str , list ] = \"cpu\" , name : str = None , ) -> None : \"\"\"Constructs a new instance of the Autoencoder Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. input_dim (Optional[Tuple[int, ...]], optional): The input dimensions. Used for automatic network generation. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimensions. Used for automatic network generation. Defaults to None. latent_dim (Optional[int], optional): The latent dimensions. Used for automatic network generation. Defaults to None. activation (Optional[Union[list, str]], optional): The activation functions for each layer. Used for automatic network generation. Defaults to None. channels (Optional[int], optional): The number of channels. Used for automatic network generation. Defaults to None. case (Optional[str], optional): The type of problem. Used for automatic network generation. Defaults to None. architecture (Optional[str], optional): The network architecture. Used for automatic network generation. Defaults to None. shallow (Optional[bool], optional): Whether to use shallow or deep network. Used for automatic network generation. Defaults to False. use_batch_norm (Optional[bool], optional): (Default value = False) encoder_activation (str, optional): The activation function for the encoder. Defaults to \"relu\". devices (Union[str, list], optional): The devices to use. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. \"\"\" super ( AutoencoderKoopman , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , ) self . encoder = encoder . to ( self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights if bottleneck_encoder is not None and bottleneck_decoder is not None : self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . K_op = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension , bias = False ) . weight , device = self . device , ) self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.latent_forward","text":"Evaluates the operation u^{u+1} = K u^{i} Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 820 821 822 823 824 825 826 827 828 829 830 831 832 def latent_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation u^{u+1} = K u^{i} Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , self . K_op . T )","title":"latent_forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.latent_forward_m","text":"Evaluates the operation $u^{u+m} = K^m u^{i}$ Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The computed latent representation. Source code in simulai/models/_pytorch_models/_autoencoder.py 805 806 807 808 809 810 811 812 813 814 815 816 817 818 def latent_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation $u^{u+m} = K^m u^{i}$ Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The computed latent representation. \"\"\" return torch . matmul ( input_data , torch . pow ( self . K_op . T , m ))","title":"latent_forward_m()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.predict","text":"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None n_steps int The number of extrapolations to perform. Defaults to 1. 1 Returns: Type Description ndarray np.ndarray: The predicted reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 def predict ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , n_steps : int = 1 ) -> np . ndarray : \"\"\"Predicts the reconstructed data for the input data after n_steps extrapolation in the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. n_steps (int, optional): The number of extrapolations to perform. Defaults to 1. Returns: np.ndarray: The predicted reconstructed data. \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) predictions = list () latent = self . projection ( input_data = input_data ) init_latent = latent # Extrapolating in the latent space over n_steps steps for s in range ( n_steps ): latent_s = self . latent_forward ( input_data = init_latent ) init_latent = latent_s predictions . append ( latent_s ) predictions = torch . vstack ( predictions ) reconstructed_predictions = self . reconstruction ( input_data = predictions ) return reconstructed_predictions . detach () . numpy ()","title":"predict()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.project","text":"Projects the input data into the latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The projected data. Source code in simulai/models/_pytorch_models/_autoencoder.py 902 903 904 905 906 907 908 909 910 911 912 913 914 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Projects the input data into the latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The projected data. \"\"\" projected_data = self . projection ( input_data = input_data ) return projected_data . cpu () . detach () . numpy ()","title":"project()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.reconstruct","text":"Reconstructs the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description ndarray np.ndarray: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"Reconstructs the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: np.ndarray: The reconstructed data. \"\"\" reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"reconstruct()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.reconstruction_forward","text":"Evaluates the operation \u0168 = D(E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168 = D(E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"reconstruction_forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderKoopman.reconstruction_forward_m","text":"Evaluates the operation \u0168_m = D(K^m E(U)) Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data. Defaults to None. None m int The number of Koopman iterations. Defaults to 1. 1 Returns: Type Description Tensor torch.Tensor: The reconstructed data. Source code in simulai/models/_pytorch_models/_autoencoder.py 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 def reconstruction_forward_m ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , m : int = 1 ) -> torch . Tensor : \"\"\"Evaluates the operation \u0168_m = D(K^m E(U)) Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data. Defaults to None. m (int, optional): The number of Koopman iterations. Defaults to 1. Returns: torch.Tensor: The reconstructed data. \"\"\" latent = self . projection ( input_data = input_data ) latent_m = self . latent_forward_m ( input_data = latent , m = m ) reconstructed_m = self . reconstruction ( input_data = latent_m ) return reconstructed_m","title":"reconstruction_forward_m()"},{"location":"simulai_models/simulai_models_autoencoder/#autoencodervariational","text":"Bases: NetworkTemplate This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: The convolutional encoder [Optional] Fully-connected encoder Gaussian noise Fully connected decoder The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER Source code in simulai/models/_pytorch_models/_autoencoder.py 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 1455 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 1477 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 1500 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 1525 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 class AutoencoderVariational ( NetworkTemplate ): r \"\"\"This is an implementation of a Koopman autoencoder as a reduced order model. A variational autoencoder architecture consists of five stages: - The convolutional encoder [Optional] - Fully-connected encoder - Gaussian noise - Fully connected decoder - The convolutional decoder [Optional] Graphical scheme Gaussian noise ^ | | | | | | | | Z -> [Conv] -> [Conv] -> ... [Conv] -> | | | - | | | -> [Conv.T] -> [Conv.T] -> ... [Conv.T] -> Z_til | | | | | | ENCODER DENSE BOTTLENECK DECODER \"\"\" def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict () def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self ) @as_tensor def _projection_with_bottleneck ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder and bottleneck encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the bottleneck encoder applied to the input data. Note: This function is used for projection of the input data into the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection_with_bottleneck(input_data=input_data) \"\"\" btnk_input = self . encoder . forward ( input_data = input_data ) self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( self . before_flatten_dimension ))) latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) return latent @as_tensor def _projection ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder to input data and returns the output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the encoder, by default None Returns: torch.Tensor: The output of the encoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._projection(input_data=input_data) \"\"\" latent = self . encoder . forward ( input_data = input_data ) return latent @as_tensor def _reconstruction_with_bottleneck ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the bottleneck decoder and decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the bottleneck decoder and decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the bottleneck decoder's output. Note: This function is used for reconstruction of the input data from the bottleneck space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> bottleneck_output = autoencoder._projection_with_bottleneck(input_data=input_data) >>> output_data = autoencoder._reconstruction_with_bottleneck(input_data=bottleneck_output) \"\"\" bottleneck_output = self . encoder_activation ( ( self . bottleneck_decoder . forward ( input_data = input_data )) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 ,) + self . before_flatten_dimension ) reconstructed = self . decoder . forward ( input_data = bottleneck_output ) return reconstructed @as_tensor def _reconstruction ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : r \"\"\"Applies the decoder to input data and returns the output. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input data to pass through the decoder, by default None Returns: torch.Tensor: The output of the decoder applied to the input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder._reconstruction(input_data=input_data) \"\"\" reconstructed = self . decoder . forward ( input_data = input_data ) return reconstructed def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent ) def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 ) def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy () def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy () def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"AutoencoderVariational"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.CoVariance","text":"Computes the covariance matrix of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the covariance matrix, by default None None inv bool If True, returns the inverse of the covariance matrix, by default False False to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 def CoVariance ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , inv : bool = False , to_numpy : bool = False , ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the covariance matrix of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the covariance matrix, by default None inv (bool, optional): If True, returns the inverse of the covariance matrix, by default False to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The covariance matrix (or its inverse) of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> covariance = autoencoder.CoVariance(input_data=input_data) \"\"\" if inv == False : Sigma_inv = 1 / self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma_inv ) else : Sigma = self . Sigma ( input_data = input_data ) covariance = torch . diag_embed ( Sigma ) if to_numpy == True : return covariance . detach () . numpy () else : return covariance","title":"CoVariance()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.Mu","text":"Computes the mean of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the mean, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 def Mu ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the mean of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the mean, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The mean of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> mu = autoencoder.Mu(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return self . z_mean ( latent ) . detach () . numpy () else : return self . z_mean ( latent )","title":"Mu()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.Sigma","text":"Computes the standard deviation of the encoded input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and compute the standard deviation, by default None None to_numpy bool If True, returns the result as a NumPy array, by default False False Returns: Type Description Union [ ndarray , Tensor ] Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 def Sigma ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , to_numpy : bool = False ) -> Union [ np . ndarray , torch . Tensor ]: r \"\"\"Computes the standard deviation of the encoded input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and compute the standard deviation, by default None to_numpy (bool, optional): If True, returns the result as a NumPy array, by default False Returns: Union[np.ndarray, torch.Tensor]: The standard deviation of the encoded input data. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> sigma = autoencoder.Sigma(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) if to_numpy == True : return torch . exp ( self . z_log_var ( latent ) / 2 ) . detach () . numpy () else : return torch . exp ( self . z_log_var ( latent ) / 2 )","title":"Sigma()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.__init__","text":"Constructor method. Parameters: Name Type Description Default encoder Union [ ConvolutionalNetwork , DenseNetwork ] The encoder network. Defaults to None. None bottleneck_encoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck encoder network. Defaults to None. None bottleneck_decoder Optional [ Union [ Linear , DenseNetwork ]] The bottleneck decoder network. Defaults to None. None decoder Union [ ConvolutionalNetwork , DenseNetwork ] The decoder network. Defaults to None. None encoder_activation str The activation function to use in the encoder. Defaults to \"relu\". 'relu' input_dim Optional [ Tuple [ int , ...]] The input dimension of the data. Defaults to None. None output_dim Optional [ Tuple [ int , ...]] The output dimension of the data. Defaults to None. None latent_dim Optional [ int ] The size of the bottleneck layer. Defaults to None. None activation Optional [ Union [ list , str ]] The activation function to use in the networks. Defaults to None. None channels Optional [ int ] The number of channels in the input data. Defaults to None. None kernel_size Optional [ int ] Convolutional kernel size. (Default value = None) None case Optional [ str ] The name of the autoencoder variant. Defaults to None. None architecture Optional [ str ] The architecture of the networks. Defaults to None. None use_batch_norm Optional [ bool ] (Default value = False) False shallow Optional [ bool ] Whether to use a shallow network architecture. Defaults to False. False scale float The scale of the initialization. Defaults to 1e-3. 0.001 devices Union [ str , list ] The device(s) to use for computation. Defaults to \"cpu\". 'cpu' name str The name of the autoencoder. Defaults to None. None Source code in simulai/models/_pytorch_models/_autoencoder.py 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 def __init__ ( self , encoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , bottleneck_encoder : Optional [ Union [ Linear , DenseNetwork ]] = None , bottleneck_decoder : Optional [ Union [ Linear , DenseNetwork ]] = None , decoder : Union [ ConvolutionalNetwork , DenseNetwork ] = None , encoder_activation : str = \"relu\" , input_dim : Optional [ Tuple [ int , ... ]] = None , output_dim : Optional [ Tuple [ int , ... ]] = None , latent_dim : Optional [ int ] = None , activation : Optional [ Union [ list , str ]] = None , channels : Optional [ int ] = None , kernel_size : Optional [ int ] = None , case : Optional [ str ] = None , architecture : Optional [ str ] = None , use_batch_norm : Optional [ bool ] = False , shallow : Optional [ bool ] = False , scale : float = 1e-3 , devices : Union [ str , list ] = \"cpu\" , name : str = None , ** kwargs , ) -> None : r \"\"\"Constructor method. Args: encoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The encoder network. Defaults to None. bottleneck_encoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck encoder network. Defaults to None. bottleneck_decoder (Optional[Union[Linear, DenseNetwork]], optional): The bottleneck decoder network. Defaults to None. decoder (Union[ConvolutionalNetwork, DenseNetwork], optional): The decoder network. Defaults to None. encoder_activation (str, optional): The activation function to use in the encoder. Defaults to \"relu\". input_dim (Optional[Tuple[int, ...]], optional): The input dimension of the data. Defaults to None. output_dim (Optional[Tuple[int, ...]], optional): The output dimension of the data. Defaults to None. latent_dim (Optional[int], optional): The size of the bottleneck layer. Defaults to None. activation (Optional[Union[list, str]], optional): The activation function to use in the networks. Defaults to None. channels (Optional[int], optional): The number of channels in the input data. Defaults to None. kernel_size (Optional[int], optional): Convolutional kernel size. (Default value = None) case (Optional[str], optional): The name of the autoencoder variant. Defaults to None. architecture (Optional[str], optional): The architecture of the networks. Defaults to None. use_batch_norm (Optional[bool], optional): (Default value = False) shallow (Optional[bool], optional): Whether to use a shallow network architecture. Defaults to False. scale (float, optional): The scale of the initialization. Defaults to 1e-3. devices (Union[str, list], optional): The device(s) to use for computation. Defaults to \"cpu\". name (str, optional): The name of the autoencoder. Defaults to None. **kwargs \"\"\" super ( AutoencoderVariational , self ) . __init__ ( name = name ) self . weights = list () # Determining the kind of device to be used for allocating the # subnetworks self . device = self . _set_device ( devices = devices ) self . input_dim = None # If not network is provided, the automatic generation # pipeline is activated. if all ( [ isn == None for isn in [ encoder , decoder , bottleneck_encoder , bottleneck_decoder ] ] ): self . input_dim = input_dim encoder , decoder , bottleneck_encoder , bottleneck_decoder = autoencoder_auto ( input_dim = input_dim , latent_dim = latent_dim , output_dim = output_dim , activation = activation , channels = channels , kernel_size = kernel_size , architecture = architecture , case = case , shallow = shallow , use_batch_norm = use_batch_norm , name = self . name , ** kwargs , ) self . encoder = self . to_wrap ( entity = encoder , device = self . device ) self . decoder = decoder . to ( self . device ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) self . weights += self . encoder . weights self . weights += self . decoder . weights self . there_is_bottleneck = False # These subnetworks are optional if bottleneck_encoder is not None and bottleneck_decoder is not None : self . bottleneck_encoder = self . to_wrap ( entity = bottleneck_encoder , device = self . device ) self . bottleneck_decoder = self . to_wrap ( entity = bottleneck_decoder , device = self . device ) self . add_module ( \"bottleneck_encoder\" , self . bottleneck_encoder ) self . add_module ( \"bottleneck_decoder\" , self . bottleneck_decoder ) self . weights += self . bottleneck_encoder . weights self . weights += self . bottleneck_decoder . weights self . projection = self . _projection_with_bottleneck self . reconstruction = self . _reconstruction_with_bottleneck self . there_is_bottleneck = True else : self . projection = self . _projection self . reconstruction = self . _reconstruction self . last_encoder_channels = None self . before_flatten_dimension = None self . latent_dimension = None if bottleneck_encoder is not None : self . latent_dimension = bottleneck_encoder . output_size else : self . latent_dimension = self . encoder . output_size self . z_mean = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . z_log_var = self . to_wrap ( entity = torch . nn . Linear ( self . latent_dimension , self . latent_dimension ), device = self . device , ) self . add_module ( \"z_mean\" , self . z_mean ) self . add_module ( \"z_log_var\" , self . z_log_var ) self . weights += [ self . z_mean . weight ] self . weights += [ self . z_log_var . weight ] self . mu = None self . log_v = None self . scale = scale self . encoder_activation = self . _get_operation ( operation = encoder_activation ) self . shapes_dict = dict ()","title":"__init__()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.eval","text":"Reconstructs the input data using the mean of the encoded data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1526 1527 1528 1529 1530 1531 1532 1533 1534 1535 1536 1537 1538 1539 1540 1541 1542 1543 1544 1545 1546 def eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the mean of the encoded data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.eval(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) return self . reconstruction_eval ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"eval()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.latent_gaussian_noisy","text":"Generates a noisy latent representation of the input data. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to encode and generate a noisy latent representation, by default None None Returns: Type Description Tensor torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 def latent_gaussian_noisy ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Generates a noisy latent representation of the input data. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to encode and generate a noisy latent representation, by default None Returns: torch.Tensor: A noisy latent representation of the input data. Note: This function adds Gaussian noise to the mean and standard deviation of the encoded input data to generate a noisy latent representation. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> noisy_latent = autoencoder.latent_gaussian_noisy(input_data=input_data) \"\"\" self . mu = self . z_mean ( input_data ) self . log_v = self . z_log_var ( input_data ) eps = self . scale * torch . autograd . Variable ( torch . randn ( * self . log_v . size ()) ) . type_as ( self . log_v ) return self . mu + torch . exp ( self . log_v / 2.0 ) * eps","title":"latent_gaussian_noisy()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.project","text":"Projects the input data onto the autoencoder's latent space. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to project onto the autoencoder's latent space, by default None None Returns: Type Description ndarray np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1478 1479 1480 1481 1482 1483 1484 1485 1486 1487 1488 1489 1490 1491 1492 1493 1494 1495 1496 1497 1498 1499 def project ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Projects the input data onto the autoencoder's latent space. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to project onto the autoencoder's latent space, by default None Returns: np.ndarray: The input data projected onto the autoencoder's latent space. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> projected_data = autoencoder.project(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) projected_data_latent = self . Mu ( input_data = input_data ) return projected_data_latent . cpu () . detach () . numpy ()","title":"project()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.reconstruct","text":"Reconstructs the input data using the trained autoencoder. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to reconstruct, by default None None Returns: Type Description ndarray np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1501 1502 1503 1504 1505 1506 1507 1508 1509 1510 1511 1512 1513 1514 1515 1516 1517 1518 1519 1520 1521 1522 1523 1524 def reconstruct ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : r \"\"\"Reconstructs the input data using the trained autoencoder. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to reconstruct, by default None Returns: np.ndarray: The reconstructed data. Example: >>> autoencoder = Autoencoder(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruct(input_data=input_data) \"\"\" if isinstance ( input_data , np . ndarray ): input_data = torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) input_data = input_data . to ( self . device ) reconstructed_data = self . reconstruction ( input_data = input_data ) return reconstructed_data . cpu () . detach () . numpy ()","title":"reconstruct()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.reconstruction_eval","text":"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1456 1457 1458 1459 1460 1461 1462 1463 1464 1465 1466 1467 1468 1469 1470 1471 1472 1473 1474 1475 1476 def reconstruction_eval ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, computes the mean of the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_eval(input_data=input_data) \"\"\" encoder_output = self . projection ( input_data = input_data ) latent = self . z_mean ( encoder_output ) reconstructed = self . reconstruction ( input_data = latent ) return reconstructed","title":"reconstruction_eval()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.reconstruction_forward","text":"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] The input data to pass through the autoencoder, by default None None Returns: Type Description Tensor torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449 1450 1451 1452 1453 1454 def reconstruction_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> torch . Tensor : r \"\"\"Applies the encoder, adds Gaussian noise to the encoded data, and then applies the decoder to generate a reconstructed output. Args: input_data (Union[np.ndarray, torch.Tensor], optional): The input data to pass through the autoencoder, by default None Returns: torch.Tensor: The reconstructed output of the autoencoder. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> reconstructed_data = autoencoder.reconstruction_forward(input_data=input_data) \"\"\" latent = self . projection ( input_data = input_data ) latent_noisy = self . latent_gaussian_noisy ( input_data = latent ) reconstructed = self . reconstruction ( input_data = latent_noisy ) return reconstructed","title":"reconstruction_forward()"},{"location":"simulai_models/simulai_models_autoencoder/#simulai.models.AutoencoderVariational.summary","text":"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Parameters: Name Type Description Default input_data Union [ ndarray , Tensor ] Input data to pass through the encoder, by default None None input_shape list The shape of the input data if input_data is None, by default None None verbose bool (Default value = True) True display bool (Default value = True) True Returns: Type Description Tensor torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Type Description Exception If self.input_dim is not a tuple or an integer. AssertionError If input_shape is None when input_data is None. Note The summary method calls the summary method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) Source code in simulai/models/_pytorch_models/_autoencoder.py 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1170 1171 1172 1173 1174 1175 1176 1177 1178 1179 1180 1181 1182 1183 1184 1185 1186 1187 1188 1189 1190 1191 1192 1193 1194 1195 1196 1197 1198 1199 1200 1201 1202 1203 1204 1205 1206 1207 1208 1209 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 def summary ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , input_shape : list = None , verbose : bool = True , display : bool = True , ) -> torch . Tensor : r \"\"\"Summarizes the overall architecture of the autoencoder and saves the content of the subnetworks to a dictionary. Args: input_data (Union[np.ndarray, torch.Tensor], optional): Input data to pass through the encoder, by default None input_shape (list, optional): The shape of the input data if input_data is None, by default None verbose (bool, optional): (Default value = True) display (bool, optional): (Default value = True) Returns: torch.Tensor: The output of the autoencoder's decoder applied to the input data. Raises: Exception: If self.input_dim is not a tuple or an integer. AssertionError: If input_shape is None when input_data is None. Note: The summary method calls the `summary` method of each of the subnetworks and saves the content of the subnetworks to the overall architecture dictionary. If there is a bottleneck network, it is also summarized and saved to the architecture dictionary. Example: >>> autoencoder = AutoencoderVariational(input_dim=(28, 28, 1)) >>> input_data = np.random.rand(1, 28, 28, 1) >>> output_data = autoencoder.summary(input_data=input_data) \"\"\" if verbose == True : if self . input_dim != None : if type ( self . input_dim ) == tuple : input_shape = list ( self . input_dim ) elif type ( self . input_dim ) == int : input_shape = [ None , self . input_dim ] else : raise Exception ( f \"input_dim is expected to be tuple or int, but received { type ( self . input_dim ) } \" ) else : pass self . encoder . summary ( input_data = input_data , input_shape = input_shape , device = self . device , display = display , ) if type ( self . encoder . output_size ) == tuple : self . before_flatten_dimension = tuple ( self . encoder . output_size [ 1 :]) input_shape = self . encoder . input_size elif type ( self . encoder . output_size ) == int : input_shape = [ None , self . encoder . input_size ] else : pass if isinstance ( input_data , np . ndarray ): btnk_input = self . encoder . forward ( input_data = input_data ) else : assert ( input_shape ), \"It is necessary to have input_shape when input_data is None.\" input_shape [ 0 ] = 1 input_data = self . to_wrap ( entity = torch . ones ( input_shape ), device = self . device ) btnk_input = self . encoder . forward ( input_data = input_data ) before_flatten_dimension = tuple ( btnk_input . shape [ 1 :]) btnk_input = btnk_input . reshape (( - 1 , np . prod ( btnk_input . shape [ 1 :]))) # Bottleneck networks is are optional if self . there_is_bottleneck : latent = self . bottleneck_encoder . forward ( input_data = btnk_input ) self . bottleneck_encoder . summary ( display = display ) self . bottleneck_decoder . summary ( display = display ) bottleneck_output = self . encoder_activation ( self . bottleneck_decoder . forward ( input_data = latent ) ) bottleneck_output = bottleneck_output . reshape ( ( - 1 , * before_flatten_dimension ) ) else : bottleneck_output = btnk_input self . decoder . summary ( input_data = bottleneck_output , device = self . device , display = display ) # Saving the content of the subnetworks to the overall architecture dictionary self . shapes_dict . update ({ \"encoder\" : self . encoder . shapes_dict }) # Bottleneck networks is are optional if self . there_is_bottleneck : self . shapes_dict . update ( { \"bottleneck_encoder\" : self . bottleneck_encoder . shapes_dict } ) self . shapes_dict . update ( { \"bottleneck_decoder\" : self . bottleneck_decoder . shapes_dict } ) self . shapes_dict . update ({ \"decoder\" : self . decoder . shapes_dict }) else : print ( self )","title":"summary()"},{"location":"simulai_models/simulai_models_deeponet/","text":"red { color: red } DeepONets # DeepONet # Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_deeponet.py 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 class DeepONet ( NetworkTemplate ): name = \"deeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ] def _latent_dimension_is_correct ( self , dim : Union [ int , tuple ]) -> bool : \"\"\"It checks if the latent dimension is consistent. Args: dim (Union[int, tuple]): Latent_space_dimension. Returns: bool: The confirmation about the dimensionality correctness. \"\"\" if type ( dim ) == int : return True elif type ( dim ) == tuple : if len ( tuple ) == 1 : return True else : return False def _bias_compatibility_is_correct ( self , dim_trunk : Union [ int , tuple ], dim_branch : Union [ int , tuple ] ) -> bool : assert dim_branch == dim_trunk + self . var_dim , ( \"When using bias, the dimension\" + \"of the branch output should be\" + \"trunk output + var_dim.\" ) def _forward_dense ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a matrix-like product, it means, the trunk network outputs serve as \"interpolation basis\" for the branch outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_branch . shape [ - 1 ] / self . var_dim ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , self . var_dim , latent_dim ) ) output = torch . matmul ( output_branch_reshaped , output_trunk [ ... , None ]) output = torch . squeeze ( output ) return output def _forward_pointwise ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product, after that a reshaping is applied in order to produce multiple outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_trunk . shape [ - 1 ] / self . var_dim ) output_trunk_reshaped = torch . reshape ( output_trunk , ( - 1 , latent_dim , self . var_dim ) ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , latent_dim , self . var_dim ) ) output = torch . sum ( output_trunk_reshaped * output_branch_reshaped , dim =- 2 , keepdim = False ) return output def _forward_vanilla ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = torch . sum ( output_trunk * output_branch , dim =- 1 , keepdim = True ) return output def _forward_selector_ ( self ) -> callable : \"\"\"It selects the forward method to be used. Returns: callable : The callable corresponding to the required forward method. \"\"\" if self . var_dim > 1 : # It operates as a typical dense layer if self . product_type == \"dense\" : return self . _forward_dense # It executes an inner product by parts between the outputs # of the subnetworks branch and trunk else : return self . _forward_pointwise else : return self . _forward_vanilla @property def _var_map ( self ) -> dict : # It checks all the data arrays in self.var_map have the same # batches dimension batches_dimensions = set ([ value . shape [ 0 ] for value in self . var_map . values ()]) assert ( len ( batches_dimensions ) == 1 ), \"This dataset is not proper to apply shuffling\" dim = list ( batches_dimensions )[ 0 ] indices = np . arange ( dim ) np . random . shuffle ( indices ) var_map_shuffled = { key : value [ indices ] for key , value in self . var_map . items ()} return var_map_shuffled @property def weights ( self ) -> list : return sum ([ net . weights for net in self . subnetworks ], []) # Now, a sequence of wrappers def _wrapper_bias_inactive ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return output def _wrapper_bias_active ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output_branch_ = output_branch [:, : - self . var_dim ] bias = output_branch [:, - self . var_dim :] output = ( self . _forward ( output_trunk = output_trunk , output_branch = output_branch_ ) + bias ) return output def _wrapper_decoder_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return self . decoder_network . forward ( input_data = input_data ) def _wrapper_decoder_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def _wrapper_rescale_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data * self . rescale_factors def _wrapper_rescale_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output )) @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy () @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary () __init__ ( trunk_network = None , branch_network = None , decoder_network = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , model_id = None , use_bias = False ) # Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional). (Default value = None) None devices Union [ str , list ] Devices in which the model will be executed. (Default value = \"cpu\") 'cpu' product_type str Type of product to execute in the embedding space. (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None model_id str Name for the model (Default value = None) None use_bias bool (Default value = False) False Source code in simulai/models/_pytorch_models/_deeponet.py 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ] eval ( trunk_data = None , branch_data = None ) # It uses the network to make evaluations. Parameters: Name Type Description Default trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy () eval_subnetwork ( name = None , input_data = None ) # It evaluates the output of DeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None input_data Union [ ndarray , Tensor ] The data used as input for the subnetwork. (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy () forward ( input_trunk = None , input_branch = None ) # Wrapper forward method. Parameters: Name Type Description Default input_trunk Union [ ndarray , Tensor ] (Default value = None) None input_branch Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description Tensor torch.Tensor: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output )) ResDeepONet # Bases: DeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 class ResDeepONet ( DeepONet ): name = \"resdeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual def _forward_default ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method which considers the network a residual operation. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual def _forward_multiplied_by_trunk ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method with multiplication by the trunk embedding. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual * input_trunk def _forward_cut_residual ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Forward method in which the residual operation is ignored. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return output __init__ ( trunk_network = None , branch_network = None , decoder_network = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , residual = True , multiply_by_trunk = False , model_id = None , use_bias = False ) # Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional) (Default value = None) None # Union [ str , list ] (Default value = \"cpu\") required product_type str Type of product to execute in the embedding space (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None residual bool Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) True multiply_by_trunk bool Multiply the output by the trunk input or not. NOTE: if the option 'residual' False is activated it is performed after the multiplication: output*trunk_input + branch_input (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual ImprovedDeepONet # Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 class ImprovedDeepONet ( ResDeepONet ): name = \"improveddeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved def _forward_improved ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Improved forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution v = self . encoder_trunk . forward ( input_data = input_trunk ) u = self . encoder_branch . forward ( input_data = input_branch ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk , u = u , v = v ), device = self . device , ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch , u = u , v = v ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () ) def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Encoder Trunk:\" ) self . encoder_trunk . summary () print ( \"Encoder Branch:\" ) self . encoder_branch . summary () print ( \"Branch Network:\" ) self . branch_network . summary () __init__ ( trunk_network = None , branch_network = None , decoder_network = None , encoder_trunk = None , encoder_branch = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , residual = False , multiply_by_trunk = False , model_id = None , use_bias = False ) # The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Parameters: Name Type Description Default trunk_network ConvexDenseNetwork Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network ConvexDenseNetwork Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None encoder_trunk NetworkTemplate Shallow subnework used to map the trunk input to an auxiliary embedding None employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved eval_subnetwork ( name = None , trunk_data = None , branch_data = None ) # It evaluates the output of ImprovedDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () ) FlexibleDeepONet # Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 class FlexibleDeepONet ( ResDeepONet ): name = \"flexibledeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ] def _rescaling_operation ( self , input_data : torch . Tensor = None , rescaling_tensor : torch . Tensor = None ): angular = rescaling_tensor [:, : self . t_is ] linear = rescaling_tensor [:, self . t_is :] return angular * input_data + linear def _forward_flexible ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Flexible forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch ), device = self . device , ) rescaling = self . to_wrap ( entity = self . pre_network . forward ( input_data = input_branch ), device = self . device ) input_trunk_rescaled = self . _rescaling_operation ( input_data = input_trunk , rescaling_tensor = rescaling ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk_rescaled ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Pre Network:\" ) self . pre_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary () __init__ ( trunk_network = None , branch_network = None , decoder_network = None , pre_network = None , var_dim = 1 , devices = 'cpu' , product_type = None , rescale_factors = None , residual = False , multiply_by_trunk = False , model_id = None , use_bias = False ) # Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None pre_network NetworkTemplate Subnework used to predict rescaling parameters for the trunk input None accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ] eval_subnetwork ( name = None , trunk_data = None , branch_data = None ) # It evaluates the output of FlexibleDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"Simulai models deeponet"},{"location":"simulai_models/simulai_models_deeponet/#deeponets","text":"","title":"DeepONets"},{"location":"simulai_models/simulai_models_deeponet/#deeponet","text":"Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_deeponet.py 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 class DeepONet ( NetworkTemplate ): name = \"deeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ] def _latent_dimension_is_correct ( self , dim : Union [ int , tuple ]) -> bool : \"\"\"It checks if the latent dimension is consistent. Args: dim (Union[int, tuple]): Latent_space_dimension. Returns: bool: The confirmation about the dimensionality correctness. \"\"\" if type ( dim ) == int : return True elif type ( dim ) == tuple : if len ( tuple ) == 1 : return True else : return False def _bias_compatibility_is_correct ( self , dim_trunk : Union [ int , tuple ], dim_branch : Union [ int , tuple ] ) -> bool : assert dim_branch == dim_trunk + self . var_dim , ( \"When using bias, the dimension\" + \"of the branch output should be\" + \"trunk output + var_dim.\" ) def _forward_dense ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a matrix-like product, it means, the trunk network outputs serve as \"interpolation basis\" for the branch outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_branch . shape [ - 1 ] / self . var_dim ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , self . var_dim , latent_dim ) ) output = torch . matmul ( output_branch_reshaped , output_trunk [ ... , None ]) output = torch . squeeze ( output ) return output def _forward_pointwise ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product, after that a reshaping is applied in order to produce multiple outputs. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" latent_dim = int ( output_trunk . shape [ - 1 ] / self . var_dim ) output_trunk_reshaped = torch . reshape ( output_trunk , ( - 1 , latent_dim , self . var_dim ) ) output_branch_reshaped = torch . reshape ( output_branch , ( - 1 , latent_dim , self . var_dim ) ) output = torch . sum ( output_trunk_reshaped * output_branch_reshaped , dim =- 2 , keepdim = False ) return output def _forward_vanilla ( self , output_trunk : torch . Tensor = None , output_branch : torch . Tensor = None ) -> torch . Tensor : \"\"\"Forward method used when the embeddings are multiplied using a simple point-wise product. Args: output_trunk (torch.Tensor, optional): The embedding generated by the trunk network. (Default value = None) output_branch (torch.Tensor, optional): The embedding generated by the branch network. (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = torch . sum ( output_trunk * output_branch , dim =- 1 , keepdim = True ) return output def _forward_selector_ ( self ) -> callable : \"\"\"It selects the forward method to be used. Returns: callable : The callable corresponding to the required forward method. \"\"\" if self . var_dim > 1 : # It operates as a typical dense layer if self . product_type == \"dense\" : return self . _forward_dense # It executes an inner product by parts between the outputs # of the subnetworks branch and trunk else : return self . _forward_pointwise else : return self . _forward_vanilla @property def _var_map ( self ) -> dict : # It checks all the data arrays in self.var_map have the same # batches dimension batches_dimensions = set ([ value . shape [ 0 ] for value in self . var_map . values ()]) assert ( len ( batches_dimensions ) == 1 ), \"This dataset is not proper to apply shuffling\" dim = list ( batches_dimensions )[ 0 ] indices = np . arange ( dim ) np . random . shuffle ( indices ) var_map_shuffled = { key : value [ indices ] for key , value in self . var_map . items ()} return var_map_shuffled @property def weights ( self ) -> list : return sum ([ net . weights for net in self . subnetworks ], []) # Now, a sequence of wrappers def _wrapper_bias_inactive ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return output def _wrapper_bias_active ( self , output_trunk : Union [ np . ndarray , torch . Tensor ] = None , output_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : output_branch_ = output_branch [:, : - self . var_dim ] bias = output_branch [:, - self . var_dim :] output = ( self . _forward ( output_trunk = output_trunk , output_branch = output_branch_ ) + bias ) return output def _wrapper_decoder_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return self . decoder_network . forward ( input_data = input_data ) def _wrapper_decoder_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def _wrapper_rescale_active ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data * self . rescale_factors def _wrapper_rescale_inactive ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : return input_data def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output )) @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy () @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary ()","title":"DeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.__init__","text":"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional). (Default value = None) None devices Union [ str , list ] Devices in which the model will be executed. (Default value = \"cpu\") 'cpu' product_type str Type of product to execute in the embedding space. (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None model_id str Name for the model (Default value = None) None use_bias bool (Default value = False) False Source code in simulai/models/_pytorch_models/_deeponet.py 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Classical Deep Operator Network (DeepONet), a deep learning version of the Universal Approximation Theorem. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional). (Default value = None) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( DeepONet , self ) . __init__ ( devices = devices ) # Determining the kind of device to be used for allocating the # subnetworks used in the DeepONet model self . device = self . _set_device ( devices = devices ) self . use_bias = use_bias self . trunk_network = self . to_wrap ( entity = trunk_network , device = self . device ) self . branch_network = self . to_wrap ( entity = branch_network , device = self . device ) self . add_module ( \"trunk_network\" , self . trunk_network ) self . add_module ( \"branch_network\" , self . branch_network ) if decoder_network is not None : self . decoder_network = self . to_wrap ( entity = decoder_network , device = self . device ) self . add_module ( \"decoder_network\" , self . decoder_network ) else : self . decoder_network = decoder_network self . product_type = product_type self . model_id = model_id self . var_dim = var_dim # Rescaling factors for the output if rescale_factors is not None : assert ( len ( rescale_factors ) == var_dim ), \"The number of rescaling factors must be equal to var_dim.\" rescale_factors = torch . from_numpy ( rescale_factors . astype ( \"float32\" )) self . rescale_factors = self . to_wrap ( entity = rescale_factors , device = self . device ) else : self . rescale_factors = None # Checking up whether the output of each subnetwork are in correct shape assert self . _latent_dimension_is_correct ( self . trunk_network . output_size ), ( \"The trunk network must have\" \" one-dimensional output , \" \"but received\" f \" { self . trunk_network . output_size } \" ) assert self . _latent_dimension_is_correct ( self . branch_network . output_size ), ( \"The branch network must have\" \" one-dimensional output,\" \" but received\" f \" { self . branch_network . output_size } \" ) # If bias is being used, check whether the network outputs are compatible. if self . use_bias : print ( \"Bias is being used.\" ) self . _bias_compatibility_is_correct ( dim_trunk = self . trunk_network . output_size , dim_branch = self . branch_network . output_size , ) self . bias_wrapper = self . _wrapper_bias_active else : self . bias_wrapper = self . _wrapper_bias_inactive # Using a decoder on top of the model or not if self . decoder_network is not None : self . decoder_wrapper = self . _wrapper_decoder_active else : self . decoder_wrapper = self . _wrapper_decoder_inactive # Using rescaling factors or not if rescale_factors is not None : self . rescale_wrapper = self . _wrapper_rescale_active else : self . rescale_wrapper = self . _wrapper_rescale_inactive # Checking the compatibility of the subnetworks outputs for each kind of product being employed. if self . product_type != \"dense\" : output_branch = self . branch_network . output_size output_trunk = self . trunk_network . output_size # It checks if the inner product operation can be performed. if not self . use_bias : assert output_branch == output_trunk , ( f \"The output dimensions for the sub-networks\" f \" trunk and branch must be equal but are\" f \" { output_branch } \" f \" and { output_trunk } \" ) else : print ( \"Bias compatibility was already verified.\" ) else : output_branch = self . branch_network . output_size assert not output_branch % self . var_dim , ( f \"The number of branch latent outputs must\" f \" be divisible by the number of variables,\" f \" but received { output_branch } \" f \" and { self . var_dim } \" ) self . subnetworks = [ net for net in [ self . trunk_network , self . branch_network , self . decoder_network ] if net is not None ] self . input_trunk = None self . input_branch = None self . output = None self . var_map = dict () # TODO Checking up if the input of the decoder network has the correct dimension if self . decoder_network is not None : print ( \"Decoder is being used.\" ) else : pass # Selecting the correct forward approach to be used self . _forward = self . _forward_selector_ () self . subnetworks_names = [ \"trunk\" , \"branch\" ]","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.eval","text":"It uses the network to make evaluations. Parameters: Name Type Description Default trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 @guarantee_device def eval ( self , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It uses the network to make evaluations. Args: trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The result of all the hidden operations in the network. \"\"\" output_tensor = self . forward ( input_trunk = trunk_data , input_branch = branch_data ) return output_tensor . cpu () . detach () . numpy ()","title":"eval()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.eval_subnetwork","text":"It evaluates the output of DeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None input_data Union [ ndarray , Tensor ] The data used as input for the subnetwork. (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 @guarantee_device def eval_subnetwork ( self , name : str = None , input_data : Union [ np . ndarray , torch . Tensor ] = None ) -> np . ndarray : \"\"\"It evaluates the output of DeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) input_data (Union[np.ndarray, torch.Tensor], optional): The data used as input for the subnetwork. (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_to_be_used = getattr ( self , name + \"_network\" ) return network_to_be_used . forward ( input_data ) . cpu () . detach () . numpy ()","title":"eval_subnetwork()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.DeepONet.forward","text":"Wrapper forward method. Parameters: Name Type Description Default input_trunk Union [ ndarray , Tensor ] (Default value = None) None input_branch Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description Tensor torch.Tensor: The result of all the hidden operations in the network. Source code in simulai/models/_pytorch_models/_deeponet.py 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 def forward ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Wrapper forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The result of all the hidden operations in the network. \"\"\" # Forward method execution output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_trunk ), device = self . device ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_branch ), device = self . device ) # Wrappers are applied to execute user-defined operations. # When those operations are not selected, these wrappers simply # bypass the inputs. output = self . bias_wrapper ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = self . decoder_wrapper ( input_data = output ))","title":"forward()"},{"location":"simulai_models/simulai_models_deeponet/#resdeeponet","text":"Bases: DeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 class ResDeepONet ( DeepONet ): name = \"resdeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual def _forward_default ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method which considers the network a residual operation. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual def _forward_multiplied_by_trunk ( self , input_trunk : torch . Tensor = None , input_branch : torch . Tensor = None , ) -> torch . Tensor : \"\"\"Forward method with multiplication by the trunk embedding. Args: input_trunk (torch.Tensor, optional): (Default value = None) input_branch (torch.Tensor, optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output_residual = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return input_branch + output_residual * input_trunk def _forward_cut_residual ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Forward method in which the residual operation is ignored. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" output = self . forward_ ( input_trunk = input_trunk , input_branch = input_branch ) return output","title":"ResDeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.ResDeepONet.__init__","text":"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs (Default value = None) None decoder_network NetworkTemplate Subnetworks for converting the embedding to the output (optional) (Default value = None) None # Union [ str , list ] (Default value = \"cpu\") required product_type str Type of product to execute in the embedding space (Default value = None) None rescale_factors ndarray Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) None residual bool Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) True multiply_by_trunk bool Multiply the output by the trunk input or not. NOTE: if the option 'residual' False is activated it is performed after the multiplication: output*trunk_input + branch_input (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , # The decoder network is optional and considered var_dim : int = 1 , # less effective than the output reshaping alternative devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = True , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Residual Deep Operator Network (DeepONet) The operation performed is: output = input_branch + D(param, input_branch) Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs (Default value = None) decoder_network (NetworkTemplate, optional): Subnetworks for converting the embedding to the output (optional) (Default value = None) # (Union[str, list], optional): (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): Consider the DeepONet as a residual layer (sum the output to the branch input) or not. (Default value = True) multiply_by_trunk (bool, optional): Multiply the output by the trunk input or not. NOTE: if the option 'residual' is activated it is performed after the multiplication: `output*trunk_input + branch_input` (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" super ( ResDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , # The decoder network is optional and considered var_dim = var_dim , # less effective than the output reshaping alternative devices = devices , product_type = product_type , rescale_factors = rescale_factors , model_id = model_id , use_bias = use_bias , ) input_dim = self . branch_network . input_size self . forward_ = super () . forward if residual == True : assert input_dim == var_dim , ( \"For a residual network, it is necessary to have \" \"size of branch_network input equal to var_dim, but \" f \"received { input_dim } and { var_dim } .\" ) self . forward = self . _forward_default elif multiply_by_trunk == True : self . forward = self . _forward_multiplied_by_trunk else : self . forward = self . _forward_cut_residual","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#improveddeeponet","text":"Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 class ImprovedDeepONet ( ResDeepONet ): name = \"improveddeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved def _forward_improved ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Improved forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution v = self . encoder_trunk . forward ( input_data = input_trunk ) u = self . encoder_branch . forward ( input_data = input_branch ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk , u = u , v = v ), device = self . device , ) output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch , u = u , v = v ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () ) def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Encoder Trunk:\" ) self . encoder_trunk . summary () print ( \"Encoder Branch:\" ) self . encoder_branch . summary () print ( \"Branch Network:\" ) self . branch_network . summary ()","title":"ImprovedDeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.ImprovedDeepONet.__init__","text":"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Parameters: Name Type Description Default trunk_network ConvexDenseNetwork Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network ConvexDenseNetwork Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None encoder_trunk NetworkTemplate Shallow subnework used to map the trunk input to an auxiliary embedding None employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 def __init__ ( self , trunk_network : ConvexDenseNetwork = None , branch_network : ConvexDenseNetwork = None , decoder_network : NetworkTemplate = None , encoder_trunk : NetworkTemplate = None , encoder_branch : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"The so-called Improved DeepONet architecture aims at enhancing the communication between the trunk and branch pipelines during the training process, thus allowing better generalization capabilities for the composite model. Args: trunk_network (ConvexDenseNetwork, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (ConvexDenseNetwork, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) encoder_trunk (NetworkTemplate, optional): Shallow subnework used to map the trunk input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) encoder_branch (NetworkTemplate, optional): Shallow subnework used to map the branch input to an auxiliary embedding employed in combination with the hidden spaces. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the encoders and the branch and trunk networks t_hs = trunk_network . hidden_size et_os = encoder_trunk . output_size b_hs = branch_network . hidden_size eb_os = encoder_branch . output_size assert t_hs == et_os == b_hs == eb_os , ( \"The output of the trunk encoder must have the same dimension\" \" of the trunk network hidden size, but got\" f \" { encoder_trunk . output_size } and { trunk_network . hidden_size } \" ) super ( ImprovedDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . encoder_trunk = self . to_wrap ( entity = encoder_trunk , device = self . device ) self . encoder_branch = self . to_wrap ( entity = encoder_branch , device = self . device ) self . add_module ( \"encoder_trunk\" , self . encoder_trunk ) self . add_module ( \"encoder_branch\" , self . encoder_branch ) self . forward_ = self . _forward_improved","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.ImprovedDeepONet.eval_subnetwork","text":"It evaluates the output of ImprovedDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of ImprovedDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] v = self . encoder_trunk . forward ( input_data = trunk_data ) u = self . encoder_branch . forward ( input_data = branch_data ) return ( network_instance . forward ( input_data = input_data , u = u , v = v ) . cpu () . detach () . numpy () )","title":"eval_subnetwork()"},{"location":"simulai_models/simulai_models_deeponet/#flexibledeeponet","text":"Bases: ResDeepONet Source code in simulai/models/_pytorch_models/_deeponet.py 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 class FlexibleDeepONet ( ResDeepONet ): name = \"flexibledeeponet\" engine = \"torch\" def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ] def _rescaling_operation ( self , input_data : torch . Tensor = None , rescaling_tensor : torch . Tensor = None ): angular = rescaling_tensor [:, : self . t_is ] linear = rescaling_tensor [:, self . t_is :] return angular * input_data + linear def _forward_flexible ( self , input_trunk : Union [ np . ndarray , torch . Tensor ] = None , input_branch : Union [ np . ndarray , torch . Tensor ] = None , ) -> torch . Tensor : \"\"\"Flexible forward method. Args: input_trunk (Union[np.ndarray, torch.Tensor], optional): (Default value = None) input_branch (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: torch.Tensor: The product between the two embeddings. \"\"\" # Forward method execution output_branch = self . to_wrap ( entity = self . branch_network . forward ( input_data = input_branch ), device = self . device , ) rescaling = self . to_wrap ( entity = self . pre_network . forward ( input_data = input_branch ), device = self . device ) input_trunk_rescaled = self . _rescaling_operation ( input_data = input_trunk , rescaling_tensor = rescaling ) output_trunk = self . to_wrap ( entity = self . trunk_network . forward ( input_data = input_trunk_rescaled ), device = self . device , ) output = self . _forward ( output_trunk = output_trunk , output_branch = output_branch ) return self . rescale_wrapper ( input_data = output ) @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy () def summary ( self ) -> None : print ( \"Trunk Network:\" ) self . trunk_network . summary () print ( \"Pre Network:\" ) self . pre_network . summary () print ( \"Branch Network:\" ) self . branch_network . summary ()","title":"FlexibleDeepONet"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.FlexibleDeepONet.__init__","text":"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Parameters: Name Type Description Default trunk_network NetworkTemplate Subnetwork for processing the coordinates inputs. (Default value = None) None branch_network NetworkTemplate Subnetwork for processing the forcing/conditioning inputs. (Default value = None) None decoder_network NetworkTemplate Subnetwork for converting the embedding to the output (optional). (Default value = None) None pre_network NetworkTemplate Subnework used to predict rescaling parameters for the trunk input None accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) Source code in simulai/models/_pytorch_models/_deeponet.py 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 def __init__ ( self , trunk_network : NetworkTemplate = None , branch_network : NetworkTemplate = None , decoder_network : NetworkTemplate = None , pre_network : NetworkTemplate = None , var_dim : int = 1 , devices : Union [ str , list ] = \"cpu\" , product_type : str = None , rescale_factors : np . ndarray = None , residual : bool = False , multiply_by_trunk : bool = False , model_id : str = None , use_bias : bool = False , ) -> None : \"\"\"Flexible DeepONet uses a subnetwork called 'pre-network', which plays the role of rescaling the trunk input according to the branch input. It is an attempt of reducing the training bias related to the different orders of magnitude contained in the dataset. Args: trunk_network (NetworkTemplate, optional): Subnetwork for processing the coordinates inputs. (Default value = None) branch_network (NetworkTemplate, optional): Subnetwork for processing the forcing/conditioning inputs. (Default value = None) decoder_network (NetworkTemplate, optional): Subnetwork for converting the embedding to the output (optional). (Default value = None) pre_network (NetworkTemplate, optional): Subnework used to predict rescaling parameters for the trunk input accordingly the branch input. (Default value = None) var_dim (int, optional): Number of output variables. (Default value = 1) devices (Union[str, list], optional): Devices in which the model will be executed. (Default value = \"cpu\") product_type (str, optional): Type of product to execute in the embedding space. (Default value = None) rescale_factors (np.ndarray, optional): Values used for rescaling the network outputs for a given order of magnitude. (Default value = None) residual (bool, optional): (Default value = False) multiply_by_trunk (bool, optional): (Default value = False) model_id (str, optional): Name for the model (Default value = None) use_bias (bool, optional): (Default value = False) \"\"\" # Guaranteeing the compatibility between the pre and the branch and trunk networks t_is = trunk_network . input_size p_is = pre_network . input_size p_os = pre_network . output_size b_is = branch_network . input_size assert ( 2 * t_is == p_os ) and ( b_is == p_is ), ( \"The input of branch and pre networks must have the same dimension\" \" and the output of pre and the input of trunks, too, but got\" f \" { ( b_is , p_is ) } and { ( t_is , p_os ) } .\" ) self . t_is = t_is super ( FlexibleDeepONet , self ) . __init__ ( trunk_network = trunk_network , branch_network = branch_network , decoder_network = decoder_network , var_dim = var_dim , devices = devices , product_type = product_type , rescale_factors = rescale_factors , residual = residual , multiply_by_trunk = multiply_by_trunk , model_id = model_id , use_bias = use_bias , ) self . pre_network = self . to_wrap ( entity = pre_network , device = self . device ) self . forward_ = self . _forward_flexible self . subnetworks += [ self . pre_network ] self . subnetworks_names += [ \"pre\" ]","title":"__init__()"},{"location":"simulai_models/simulai_models_deeponet/#simulai.models.FlexibleDeepONet.eval_subnetwork","text":"It evaluates the output of FlexibleDeepONet subnetworks. Parameters: Name Type Description Default name str Name of the subnetwork. (Default value = None) None trunk_data Union [ ndarray , Tensor ] (Default value = None) None branch_data Union [ ndarray , Tensor ] (Default value = None) None Returns: Type Description ndarray np.ndarray: The evaluation performed by the subnetwork. Source code in simulai/models/_pytorch_models/_deeponet.py 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 @guarantee_device def eval_subnetwork ( self , name : str = None , trunk_data : Union [ np . ndarray , torch . Tensor ] = None , branch_data : Union [ np . ndarray , torch . Tensor ] = None , ) -> np . ndarray : \"\"\"It evaluates the output of FlexibleDeepONet subnetworks. Args: name (str, optional): Name of the subnetwork. (Default value = None) trunk_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) branch_data (Union[np.ndarray, torch.Tensor], optional): (Default value = None) Returns: np.ndarray: The evaluation performed by the subnetwork. \"\"\" assert ( name in self . subnetworks_names ), f \"The name { name } is not a subnetwork of { self } .\" # Pre and branch network has the same input pre_data = branch_data network_instance = getattr ( self , name + \"_network\" ) input_data = locals ()[ name + \"_data\" ] return network_instance . forward ( input_data = input_data ) . cpu () . detach () . numpy ()","title":"eval_subnetwork()"},{"location":"simulai_models/simulai_models_transformer/","text":"red { color: red } Transformer # Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_transformer.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 class Transformer ( NetworkTemplate ): def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self ) __init__ ( num_heads_encoder = 1 , num_heads_decoder = 1 , embed_dim_encoder = Union [ int , Tuple ], embed_dim_decoder = Union [ int , Tuple ], encoder_activation = 'relu' , decoder_activation = 'relu' , encoder_mlp_layer_config = None , decoder_mlp_layer_config = None , number_of_encoders = 1 , number_of_decoders = 1 ) # A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Parameters: Name Type Description Default num_heads_encoder int The number of heads for the self-attention layer of the encoder. (Default value = 1) 1 num_heads_decoder int The number of heads for the self-attention layer of the decoder. (Default value = 1) 1 embed_dim_encoder int The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] embed_dim_decoder int The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] encoder_activation Union [ str , Module ] The activation to be used in all the encoder layers. (Default value = 'relu') 'relu' decoder_activation Union [ str , Module ] The activation to be used in all the decoder layers. (Default value = 'relu') 'relu' encoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None decoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None number_of_encoders int The number of encoders to be used. (Default value = 1) 1 number_of_decoders int The number of decoders to be used. (Default value = 1) 1 Source code in simulai/models/_pytorch_models/_transformer.py 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d ) forward ( input_data = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input dataset. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The transformer output. Source code in simulai/models/_pytorch_models/_transformer.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output summary () # It prints a general view of the architecture. Source code in simulai/models/_pytorch_models/_transformer.py 296 297 298 299 def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self )","title":"Simulai models transformer"},{"location":"simulai_models/simulai_models_transformer/#transformer","text":"Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_transformer.py 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 class Transformer ( NetworkTemplate ): def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self )","title":"Transformer"},{"location":"simulai_models/simulai_models_transformer/#simulai.models.Transformer.__init__","text":"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Parameters: Name Type Description Default num_heads_encoder int The number of heads for the self-attention layer of the encoder. (Default value = 1) 1 num_heads_decoder int The number of heads for the self-attention layer of the decoder. (Default value = 1) 1 embed_dim_encoder int The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] embed_dim_decoder int The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) Union [ int , Tuple ] encoder_activation Union [ str , Module ] The activation to be used in all the encoder layers. (Default value = 'relu') 'relu' decoder_activation Union [ str , Module ] The activation to be used in all the decoder layers. (Default value = 'relu') 'relu' encoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None decoder_mlp_layer_config dict A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) None number_of_encoders int The number of encoders to be used. (Default value = 1) 1 number_of_decoders int The number of decoders to be used. (Default value = 1) 1 Source code in simulai/models/_pytorch_models/_transformer.py 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def __init__ ( self , num_heads_encoder : int = 1 , num_heads_decoder : int = 1 , embed_dim_encoder : int = Union [ int , Tuple ], embed_dim_decoder : int = Union [ int , Tuple ], encoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , decoder_activation : Union [ str , torch . nn . Module ] = \"relu\" , encoder_mlp_layer_config : dict = None , decoder_mlp_layer_config : dict = None , number_of_encoders : int = 1 , number_of_decoders : int = 1 , ) -> None : r \"\"\"A classical encoder-decoder transformer: Graphical example: Example: U -> ( Encoder_1 -> Encoder_2 -> ... -> Encoder_N ) -> u_e (u_e, U) -> ( Decoder_1 -> Decoder_2 -> ... Decoder_N ) -> V Args: num_heads_encoder (int, optional): The number of heads for the self-attention layer of the encoder. (Default value = 1) num_heads_decoder (int, optional): The number of heads for the self-attention layer of the decoder. (Default value = 1) embed_dim_encoder (int, optional): The dimension of the embedding for the encoder. (Default value = Union[int, Tuple]) embed_dim_decoder (int, optional): The dimension of the embedding for the decoder. (Default value = Union[int, Tuple]) encoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the encoder layers. (Default value = 'relu') decoder_activation (Union[str, torch.nn.Module], optional): The activation to be used in all the decoder layers. (Default value = 'relu') encoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) decoder_mlp_layer_config (dict, optional): A configuration dictionary to instantiate the encoder MLP layer.weights (Default value = None) number_of_encoders (int, optional): The number of encoders to be used. (Default value = 1) number_of_decoders (int, optional): The number of decoders to be used. (Default value = 1) \"\"\" super ( Transformer , self ) . __init__ () self . num_heads_encoder = num_heads_encoder self . num_heads_decoder = num_heads_decoder self . embed_dim_encoder = embed_dim_encoder self . embed_dim_decoder = embed_dim_decoder self . encoder_mlp_layer_dict = encoder_mlp_layer_config self . decoder_mlp_layer_dict = decoder_mlp_layer_config self . number_of_encoders = number_of_encoders self . number_of_decoders = number_of_encoders self . encoder_activation = encoder_activation self . decoder_activation = decoder_activation self . encoder_mlp_layers_list = list () self . decoder_mlp_layers_list = list () # Creating independent copies for the MLP layers which will be used # by the multiple encoders/decoders. for e in range ( self . number_of_encoders ): self . encoder_mlp_layers_list . append ( DenseNetwork ( ** self . encoder_mlp_layer_dict ) ) for d in range ( self . number_of_decoders ): self . decoder_mlp_layers_list . append ( DenseNetwork ( ** self . decoder_mlp_layer_dict ) ) # Defining the encoder architecture self . EncoderStage = torch . nn . Sequential ( * [ BasicEncoder ( num_heads = self . num_heads_encoder , activation = self . encoder_activation , mlp_layer = self . encoder_mlp_layers_list [ e ], embed_dim = self . embed_dim_encoder , ) for e in range ( self . number_of_encoders ) ] ) # Defining the decoder architecture self . DecoderStage = torch . nn . ModuleList ( [ BasicDecoder ( num_heads = self . num_heads_decoder , activation = self . decoder_activation , mlp_layer = self . decoder_mlp_layers_list [ d ], embed_dim = self . embed_dim_decoder , ) for d in range ( self . number_of_decoders ) ] ) self . weights = list () for e , encoder_e in enumerate ( self . EncoderStage ): self . weights += encoder_e . weights self . add_module ( f \"encoder_ { e } \" , encoder_e ) for d , decoder_d in enumerate ( self . DecoderStage ): self . weights += decoder_d . weights self . add_module ( f \"decoder_ { d } \" , decoder_d )","title":"__init__()"},{"location":"simulai_models/simulai_models_transformer/#simulai.models.Transformer.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input dataset. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The transformer output. Source code in simulai/models/_pytorch_models/_transformer.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input dataset. (Default value = None) Returns: torch.Tensor: The transformer output. \"\"\" encoder_output = self . EncoderStage ( input_data ) current_input = input_data for decoder in self . DecoderStage : output = decoder ( input_data = current_input , encoder_output = encoder_output ) current_input = output return output","title":"forward()"},{"location":"simulai_models/simulai_models_transformer/#simulai.models.Transformer.summary","text":"It prints a general view of the architecture. Source code in simulai/models/_pytorch_models/_transformer.py 296 297 298 299 def summary ( self ): \"\"\"It prints a general view of the architecture.\"\"\" print ( self )","title":"summary()"},{"location":"simulai_models/simulai_models_unet/","text":"red { color: red } U-Net # Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_unet.py 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 class UNet ( NetworkTemplate ): def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self ) __init__ ( layers_config = None , intermediary_outputs_indices = None , intermediary_inputs_indices = None , encoder_extra_args = dict (), decoder_extra_args = dict ()) # U-Net. Parameters: Name Type Description Default layers_config Dict A dictionary containing the complete configuration for the None intermediary_outputs_indices List [ int ] A list of indices for indicating the encoder outputs. (Default value = None) None intermediary_inputs_indices List [ int ] A list of indices for indicating the decoder inputs. (Default value = None) None encoder_extra_args Dict A dictionary containing extra arguments for the encoder. (Default value = dict()) dict () decoder_extra_args Dict A dictionary containing extra arguments for the decoder. (Default value = dict()) dict () Source code in simulai/models/_pytorch_models/_unet.py 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) forward ( input_data = None ) # The U-Net forward method. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] A dataset to be inputted in the CNN U-Net encoder. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The U-Net output. Source code in simulai/models/_pytorch_models/_unet.py 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output summary () # It shows a general view of the architecture. Source code in simulai/models/_pytorch_models/_unet.py 280 281 282 283 def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self )","title":"Simulai models unet"},{"location":"simulai_models/simulai_models_unet/#u-net","text":"Bases: NetworkTemplate Source code in simulai/models/_pytorch_models/_unet.py 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 class UNet ( NetworkTemplate ): def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self )","title":"U-Net"},{"location":"simulai_models/simulai_models_unet/#simulai.models.UNet.__init__","text":"U-Net. Parameters: Name Type Description Default layers_config Dict A dictionary containing the complete configuration for the None intermediary_outputs_indices List [ int ] A list of indices for indicating the encoder outputs. (Default value = None) None intermediary_inputs_indices List [ int ] A list of indices for indicating the decoder inputs. (Default value = None) None encoder_extra_args Dict A dictionary containing extra arguments for the encoder. (Default value = dict()) dict () decoder_extra_args Dict A dictionary containing extra arguments for the decoder. (Default value = dict()) dict () Source code in simulai/models/_pytorch_models/_unet.py 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_config : Dict = None , intermediary_outputs_indices : List [ int ] = None , intermediary_inputs_indices : List [ int ] = None , encoder_extra_args : Dict = dict (), decoder_extra_args : Dict = dict (), ) -> None : \"\"\"U-Net. Args: layers_config (Dict, optional): A dictionary containing the complete configuration for the U-Net encoder and decoder. (Default value = None) intermediary_outputs_indices (List[int], optional): A list of indices for indicating the encoder outputs. (Default value = None) intermediary_inputs_indices (List[int], optional): A list of indices for indicating the decoder inputs. (Default value = None) encoder_extra_args (Dict, optional): A dictionary containing extra arguments for the encoder. (Default value = dict()) decoder_extra_args (Dict, optional): A dictionary containing extra arguments for the decoder. (Default value = dict()) \"\"\" super ( UNet , self ) . __init__ () self . layers_config = layers_config self . intermediary_outputs_indices = intermediary_outputs_indices self . intermediary_inputs_indices = intermediary_inputs_indices self . layers_config_encoder = self . layers_config [ \"encoder\" ] self . layers_config_decoder = self . layers_config [ \"decoder\" ] self . encoder_activations = self . layers_config [ \"encoder_activations\" ] self . decoder_activations = self . layers_config [ \"decoder_activations\" ] self . encoder_horizontal_outputs = dict () # Configuring the encoder encoder_type = self . layers_config_encoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . encoder = CNNUnetEncoder ( layers = self . layers_config_encoder [ \"architecture\" ], activations = self . encoder_activations , intermediary_outputs_indices = self . intermediary_outputs_indices , case = \"2d\" , name = \"encoder\" , ** encoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) # Configuring the decoder decoder_type = self . layers_config_decoder . get ( \"type\" ) layers_config_encoder = self . layers_config_encoder . get ( \"architecture\" ) if encoder_type == \"cnn\" : self . decoder = CNNUnetDecoder ( layers = self . layers_config_decoder [ \"architecture\" ], activations = self . decoder_activations , intermediary_inputs_indices = self . intermediary_inputs_indices , case = \"2d\" , name = \"decoder\" , ** decoder_extra_args , ) else : raise Exception ( f \"Option { encoder_type } is not available.\" ) self . add_module ( \"encoder\" , self . encoder ) self . add_module ( \"decoder\" , self . decoder )","title":"__init__()"},{"location":"simulai_models/simulai_models_unet/#simulai.models.UNet.forward","text":"The U-Net forward method. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] A dataset to be inputted in the CNN U-Net encoder. (Default value = None) None Returns: Type Description Tensor torch.Tensor: The U-Net output. Source code in simulai/models/_pytorch_models/_unet.py 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"The U-Net forward method. Args: input_data (Union[torch.Tensor, np.ndarray], optional): A dataset to be inputted in the CNN U-Net encoder. (Default value = None) Returns: torch.Tensor: The U-Net output. \"\"\" encoder_main_output , encoder_intermediary_outputs = self . encoder ( input_data = input_data ) output = self . decoder ( input_data = encoder_main_output , intermediary_encoder_outputs = encoder_intermediary_outputs , ) return output","title":"forward()"},{"location":"simulai_models/simulai_models_unet/#simulai.models.UNet.summary","text":"It shows a general view of the architecture. Source code in simulai/models/_pytorch_models/_unet.py 280 281 282 283 def summary ( self ): \"\"\"It shows a general view of the architecture.\"\"\" print ( self )","title":"summary()"},{"location":"simulai_optimization/simulai_losses/","text":"Loss Functions # RMSELoss # Bases: LossBasics Source code in simulai/optimization/_losses.py 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 class RMSELoss ( LossBasics ): def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Vanilla mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . loss_states = { \"loss\" : list ()} def _data_loss ( self , output_tilde : torch . Tensor = None , norm_value : torch . Tensor = None , target_data_tensor : torch . Tensor = None , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator norm_value (torch.Tensor): the value used for normalizing the loss evaluation target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value for a given state \"\"\" if norm_value is not None : data_loss = torch . mean ( torch . square (( output_tilde - target_data_tensor ) / norm_value ) ) else : data_loss = torch . mean ( torch . square (( output_tilde - target_data_tensor ))) return data_loss def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , norm_value : list = None , lambda_1 : float = 0.0 , device : str = \"cpu\" , lambda_2 : float = 0.0 , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' Returns: Callable: the closure function used for evaluating the loss value \"\"\" l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) def closure (): output_tilde = self . operator . forward ( ** input_data ) data_loss = self . _data_loss ( output_tilde = output_tilde , norm_value = norm_value , target_data_tensor = target_data , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = data_loss + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return loss return closure __call__ ( input_data = None , target_data = None , call_back = '' , norm_value = None , lambda_1 = 0.0 , device = 'cpu' , lambda_2 = 0.0 ) # Main function for generating complete loss function workflow Parameters: Name Type Description Default input_data Union [ dict , Tensor ] the data used as input for self.operator None target_data Tensor the target data used for training self.oeprator None call_back str a string used for composing the logging of the optimization process '' norm_value list a list of values used for normalizing the loss temrms None lambda_1 float the penalty for the L^1 regularization term 0.0 lambda_2 float the penalty for the L^2 regularization term 0.0 device str the device in which the loss evaluation will be executed, 'cpu' or 'gpu' 'cpu' Returns: Name Type Description Callable Callable the closure function used for evaluating the loss Callable value Source code in simulai/optimization/_losses.py 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , norm_value : list = None , lambda_1 : float = 0.0 , device : str = \"cpu\" , lambda_2 : float = 0.0 , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' Returns: Callable: the closure function used for evaluating the loss value \"\"\" l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) def closure (): output_tilde = self . operator . forward ( ** input_data ) data_loss = self . _data_loss ( output_tilde = output_tilde , norm_value = norm_value , target_data_tensor = target_data , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = data_loss + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return loss return closure __init__ ( operator = None ) # Vanilla mean-squared error loss function Parameters: Name Type Description Default operator Module the operator used for evaluating the loss function (usually a neural network) None Source code in simulai/optimization/_losses.py 164 165 166 167 168 169 170 171 172 173 174 def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Vanilla mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . loss_states = { \"loss\" : list ()} WRMSELoss # Bases: LossBasics Source code in simulai/optimization/_losses.py 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 class WRMSELoss ( LossBasics ): def __init__ ( self , operator = None ): \"\"\"Weighted mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . split_dim = 1 self . tol = 1e-25 self . loss_evaluator = None self . norm_evaluator = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"loss\" : list ()} def _data_loss ( self , output_tilde : torch . Tensor = None , weights : list = None , target_data_tensor : torch . Tensor = None , axis : int = - 1 , ) -> List : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator norm_value (torch.Tensor): the value used for normalizing the loss evaluation target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value for a given state \"\"\" output_split = torch . split ( output_tilde , self . split_dim , dim = axis ) target_split = torch . split ( target_data_tensor , self . split_dim , dim = axis ) data_losses = [ weights [ i ] * self . loss_evaluator ( out_split - tgt_split ) / self . norm_evaluator ( tgt_split ) for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] return data_losses def _no_data_loss_wrapper ( self , output_tilde : torch . Tensor = None , weights : list = None , target_data_tensor : torch . Tensor = None , axis : int = - 1 , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error without considering causality preserving Args: output_tilde (torch.Tensor): the output generated by self.operator weights (list): weights for rescaling each variable outputted by self.operator target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde axis (int): the axis in which the variables are split Returns: torch.Tensor: the loss function value for a given state \"\"\" return self . data_loss ( output_tilde = output_tilde , weights = weights , target_data_tensor = target_data_tensor , axis = axis , ) def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , lambda_1 : float = 0.0 , lambda_2 : float = 0.0 , axis : int = - 1 , relative : bool = False , device : str = \"cpu\" , weights : list = None , use_mean : bool = True , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' weights (list): a list of weights for rescaling each variable outputted by self.operator use_mean (bool): use mean for evaluating the losses or not (the alternative is sum) Returns: Callable: the closure function used for evaluating the loss value \"\"\" self . data_loss = self . _data_loss l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) # Using mean evaluation or not if use_mean == True : self . loss_evaluator = lambda res : torch . mean ( torch . square (( res ))) else : self . loss_evaluator = lambda res : torch . sum ( torch . square (( res ))) # Relative norm or not if relative == True : if use_mean == True : self . norm_evaluator = lambda ref : torch . mean ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : torch . sum ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : 1 self . data_loss_wrapper = self . _no_data_loss_wrapper def closure (): output_tilde = self . operator . forward ( ** input_data ) data_losses = self . data_loss_wrapper ( output_tilde = output_tilde , weights = weights , target_data_tensor = target_data , axis = axis , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = sum ( data_losses ) + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return closure __call__ ( input_data = None , target_data = None , call_back = '' , lambda_1 = 0.0 , lambda_2 = 0.0 , axis =- 1 , relative = False , device = 'cpu' , weights = None , use_mean = True ) # Main function for generating complete loss function workflow Parameters: Name Type Description Default input_data Union [ dict , Tensor ] the data used as input for self.operator None target_data Tensor the target data used for training self.oeprator None call_back str a string used for composing the logging of the optimization process '' norm_value list a list of values used for normalizing the loss temrms required lambda_1 float the penalty for the L^1 regularization term 0.0 lambda_2 float the penalty for the L^2 regularization term 0.0 device str the device in which the loss evaluation will be executed, 'cpu' or 'gpu' 'cpu' weights list a list of weights for rescaling each variable outputted by self.operator None use_mean bool use mean for evaluating the losses or not (the alternative is sum) True Returns: Name Type Description Callable Callable the closure function used for evaluating the loss Callable value Source code in simulai/optimization/_losses.py 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , lambda_1 : float = 0.0 , lambda_2 : float = 0.0 , axis : int = - 1 , relative : bool = False , device : str = \"cpu\" , weights : list = None , use_mean : bool = True , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' weights (list): a list of weights for rescaling each variable outputted by self.operator use_mean (bool): use mean for evaluating the losses or not (the alternative is sum) Returns: Callable: the closure function used for evaluating the loss value \"\"\" self . data_loss = self . _data_loss l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) # Using mean evaluation or not if use_mean == True : self . loss_evaluator = lambda res : torch . mean ( torch . square (( res ))) else : self . loss_evaluator = lambda res : torch . sum ( torch . square (( res ))) # Relative norm or not if relative == True : if use_mean == True : self . norm_evaluator = lambda ref : torch . mean ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : torch . sum ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : 1 self . data_loss_wrapper = self . _no_data_loss_wrapper def closure (): output_tilde = self . operator . forward ( ** input_data ) data_losses = self . data_loss_wrapper ( output_tilde = output_tilde , weights = weights , target_data_tensor = target_data , axis = axis , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = sum ( data_losses ) + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return closure __init__ ( operator = None ) # Weighted mean-squared error loss function Parameters: Name Type Description Default operator Module the operator used for evaluating the loss function (usually a neural network) None Source code in simulai/optimization/_losses.py 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 def __init__ ( self , operator = None ): \"\"\"Weighted mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . split_dim = 1 self . tol = 1e-25 self . loss_evaluator = None self . norm_evaluator = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"loss\" : list ()} PIRMSELoss # Bases: LossBasics Source code in simulai/optimization/_losses.py 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 class PIRMSELoss ( LossBasics ): def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Physics-Informed mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . split_dim = 1 self . operator = operator self . loss_evaluator = None self . residual = None self . tol = 1e-15 self . device = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"pde\" : list (), \"init\" : list (), \"bound\" : list (), \"extra_data\" : list (), } self . loss_tags = list ( self . loss_states . keys ()) self . hybrid_data_pinn = False self . losses_terms_indices = { \"pde\" : 0 , \"init\" : 1 , \"bound\" : 2 , \"extra_data\" : 3 , \"causality_weights\" : 4 , } def _convert ( self , input_data : Union [ dict , np . ndarray ] = None , device : str = None ) -> Union [ dict , torch . Tensor ]: \"\"\"It converts a dataset to the proper format (torch.Tensor) and send it to the chosen execution device ('gpu' or 'cpu') Args: input_data (Union[dict, np.ndarray]): the data structure to be converted device: the device in which the converted dataset must be placed Returns: Union[dict, torch.Tensor]: the converted data structure \"\"\" if type ( input_data ) == dict : return { key : torch . from_numpy ( item . astype ( ARRAY_DTYPE )) . to ( device ) for key , item in input_data . items () } else : return torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) . to ( device ) def _to_tensor ( self , * args , device : str = \"cpu\" ) -> List [ torch . Tensor ]: \"\"\"It converted a size indefined list of arrays to tensors Args: *args: list of arrays to be converted input_data (Union[dict, np.ndarray]) device: the device in which the converted dataset must be placed :type np.array, np.array, ..., np.array Returns: List[torch.Tensor]: a list of tensors \"\"\" return [ self . _convert ( input_data = arg , device = device ) for arg in args ] def _data_loss ( self , output_tilde : torch . Tensor = None , target_data_tensor : torch . Tensor = None , weights : List [ float ] = None , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value \"\"\" output_split = torch . split ( output_tilde , self . split_dim , dim =- 1 ) target_split = torch . split ( target_data_tensor , self . split_dim , dim =- 1 ) data_losses = [ self . loss_evaluator_data (( out_split , tgt_split )) / ( self . norm_evaluator ( tgt_split ) or torch . tensor ( 1.0 ) . to ( self . device )) for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] return self . weighted_loss_evaluator ( data_losses , weights ) def _data_loss_adaptive ( self , output_tilde : torch . Tensor = None , target_data_tensor : torch . Tensor = None , ** kwargs , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value \"\"\" output_split = torch . split ( output_tilde , self . split_dim , dim =- 1 ) target_split = torch . split ( target_data_tensor , self . split_dim , dim =- 1 ) data_discrepancy = [ out_split - tgt_split for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] weights = self . data_weights_estimator ( residual = data_discrepancy , loss_evaluator = self . loss_evaluator , loss_history = self . loss_states , operator = self . operator , ) data_losses = [ weights [ i ] * self . loss_evaluator_data (( out_split , tgt_split )) for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] return [ sum ( data_losses )] def _global_weights_bypass ( self , initial_penalty : float = None , ** kwargs ) -> List [ float ]: return [ 1.0 , initial_penalty , 1.0 , 1.0 ] def _global_weights_estimator ( self , ** kwargs ) -> List [ float ]: weights = self . global_weights_estimator ( ** kwargs ) return weights def _residual_loss ( self , residual_approximation : List [ torch . Tensor ] = None , weights : list = None ) -> List [ torch . Tensor ]: \"\"\"It evaluates the physics-driven residual loss Args: residual_approximation (List[torch.Tensor]): a list of tensors containing the evaluation for the physical residual for each sample in the dataset weights (list): a list of weights used for rescaling the residuals of each variable Returns: torch.Tensor: the list of residual losses \"\"\" residual_losses = [ self . loss_evaluator ( res ) for res in residual_approximation ] return self . weighted_loss_evaluator ( residual_losses , weights ) def _residual_loss_adaptive ( self , residual_approximation : List [ torch . Tensor ] = None , weights : list = None ) -> List [ torch . Tensor ]: \"\"\"It evaluates the physics-driven residual loss Args: residual_approximation (List[torch.Tensor]): a list of tensors containing the evaluation for the physical residual for each sample in the dataset weights (list): a list of weights used for rescaling the residuals of each variable Returns: torch.Tensor: the list of residual losses \"\"\" weights = self . residual_weights_estimator ( residual = residual_approximation , loss_evaluator = self . loss_evaluator , loss_history = self . loss_states , operator = self . operator , ) residual_loss = [ weight * self . loss_evaluator ( res ) for weight , res in zip ( weights , residual_approximation ) ] return [ sum ( residual_loss )] def _extra_data ( self , input_data : torch . Tensor = None , target_data : torch . Tensor = None ) -> torch . Tensor : # Evaluating data for the initial condition output_tilde = self . operator ( input_data = input_data ) # Evaluating loss approximation for extra data data_loss = self . _data_loss ( output_tilde = output_tilde , target_data_tensor = target_data ) return data_loss def _boundary_penalisation ( self , boundary_input : dict = None , residual : SymbolicOperator = None ) -> List [ torch . Tensor ]: \"\"\"It applies the boundary conditions Args: boundary_input (dict): a dictionary containing the coordinates of the boundaries residual (SymbolicOperator): a symbolic expression for the boundary condition Returns: list: the evaluation of each boundary condition \"\"\" return [ residual . eval_expression ( k , boundary_input [ k ]) for k in boundary_input . keys () ] def _no_boundary_penalisation ( self , boundary_input : dict = None , residual : object = None ) -> List [ torch . Tensor ]: \"\"\"It is used for cases in which no boundary condition is applied \"\"\" return [ torch . Tensor ([ 0.0 ]) . to ( self . device ) for k in boundary_input . keys ()] def _no_boundary ( self , boundary_input : dict = None , residual : object = None ) -> List [ torch . Tensor ]: \"\"\"It is used for cases where there are not boundaries \"\"\" return torch . Tensor ([ 0.0 ]) . to ( self . device ) def _no_extra_data ( self , input_data : torch . Tensor = None , target_data : torch . Tensor = None ) -> torch . Tensor : return torch . Tensor ([ 0.0 ]) . to ( self . device ) def _no_residual_wrapper ( self , input_data : torch . Tensor = None ) -> torch . Tensor : return self . residual ( input_data ) def _causality_preserving_residual_wrapper ( self , input_data : torch . Tensor = None ) -> List : return self . causality_preserving ( self . residual ( input_data )) def _filter_necessary_loss_terms ( self , residual : SymbolicOperator = None ): tags = [ \"pde\" , \"init\" ] indices = [ 0 , 1 ] if residual . g_expressions : tags . append ( \"bound\" ) indices . append ( 2 ) else : pass if self . hybrid_data_pinn : tags . append ( \"extra_data\" ) indices . append ( 3 ) else : pass return tags , indices def _losses_states_str ( self , tags : List [ str ] = None ): losses_str = \" \\r \" for item in tags : losses_str += f \" { item } : {{}} \" return losses_str def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : Union [ dict , torch . Tensor ] = None , verbose : bool = False , call_back : str = \"\" , residual : Callable = None , initial_input : Union [ dict , torch . Tensor ] = None , initial_state : Union [ dict , torch . Tensor ] = None , boundary_input : dict = None , boundary_penalties : list = [ 1 ], extra_input_data : Union [ dict , torch . Tensor ] = None , extra_target_data : Union [ dict , torch . Tensor ] = None , initial_penalty : float = 1 , axis : int = - 1 , relative : bool = False , lambda_1 : float = 0.0 , lambda_2 : float = 0.0 , weights = None , weights_residual = None , device : str = \"cpu\" , split_losses : bool = False , causality_preserving : Callable = None , global_weights_estimator : Callable = None , residual_weights_estimator : Callable = None , data_weights_estimator : Callable = None , use_mean : bool = True , use_data_log : bool = False , ) -> Callable : self . residual = residual self . device = device self . causality_preserving = causality_preserving # Handling expection when AnnealingWeights and split_losses # are used together. if isinstance ( global_weights_estimator , AnnealingWeights ): if split_losses : raise RuntimeError ( \"Global weights estimator, AnnealingWeights, is not\" + \"compatible with split loss terms.\" ) else : pass self . global_weights_estimator = global_weights_estimator self . residual_weights_estimator = residual_weights_estimator self . data_weights_estimator = data_weights_estimator if split_losses : self . weighted_loss_evaluator = self . _bypass_weighted_loss else : self . weighted_loss_evaluator = self . _eval_weighted_loss if ( isinstance ( extra_input_data , np . ndarray ) == isinstance ( extra_target_data , np . ndarray ) == True ): self . hybrid_data_pinn = True else : pass # When no weight is provided, they are # set to the default choice if weights is None : weights = len ( residual . output_names ) * [ 1 ] if weights_residual is None : weights_residual = len ( residual . output_names ) * [ 1 ] loss_tags , loss_indices = self . _filter_necessary_loss_terms ( residual = residual ) loss_str = self . _losses_states_str ( tags = loss_tags ) # Boundary conditions are optional, since they are not # defined in some cases, as ODE, for example. if residual . g_expressions : boundary = self . _boundary_penalisation else : if boundary_input == None : boundary = self . _no_boundary else : boundary = self . _no_boundary_penalisation if self . causality_preserving : call_back = f \", causality_weights: { self . causality_preserving . call_back } \" self . residual_wrapper = self . _causality_preserving_residual_wrapper else : self . residual_wrapper = self . _no_residual_wrapper l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) if type ( input_data ) is dict : try : input_data = input_data [ \"input_data\" ] except Exception : pass initial_input , initial_state = self . _to_tensor ( initial_input , initial_state , device = device ) # Preparing extra data, when necessary if self . hybrid_data_pinn : extra_input_data , extra_target_data = self . _to_tensor ( extra_input_data , extra_target_data , device = device ) self . extra_data = self . _extra_data else : self . extra_data = self . _no_extra_data if use_data_log == True : self . inner_square = self . _two_term_log_loss else : self . inner_square = self . _two_term_loss if use_mean == True : self . loss_evaluator = lambda res : torch . mean ( self . _single_term_loss ( res )) else : self . loss_evaluator = lambda res : torch . sum ( self . _single_term_loss ( res )) if use_mean == True : self . loss_evaluator_data = lambda res : torch . mean ( self . inner_square ( * res )) else : self . loss_evaluator_data = lambda res : torch . sum ( self . inner_square ( * res )) # Relative norm or not if relative == True : if use_mean == True : self . norm_evaluator = lambda ref : torch . mean ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : torch . sum ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : 1 # Determing the usage of special residual loss weighting if residual_weights_estimator : self . residual_loss = self . _residual_loss_adaptive else : self . residual_loss = self . _residual_loss # Determing the usage of special data loss weighting if data_weights_estimator : self . data_loss = self . _data_loss_adaptive else : self . data_loss = self . _data_loss # Determining the usage of special global loss weighting if global_weights_estimator : self . global_weights = self . _global_weights_estimator else : self . global_weights = self . _global_weights_bypass if verbose : self . pprint = self . _pprint_verbose else : self . pprint = self . _pprint_simple def closure (): # Executing the symbolic residual evaluation residual_approximation = self . residual_wrapper ( input_data ) # Boundary, if appliable boundary_approximation = boundary ( boundary_input = boundary_input , residual = residual ) # Evaluating data for the initial condition initial_output_tilde = self . operator ( input_data = initial_input ) # Evaluating loss function for residual residual_loss = self . residual_loss ( residual_approximation = residual_approximation , weights = weights_residual ) # Evaluating loss for the boundary approaximation, if appliable boundary_loss = self . _residual_loss ( residual_approximation = boundary_approximation , weights = boundary_penalties , ) # Evaluating loss approximation for initial condition initial_data_loss = self . data_loss ( output_tilde = initial_output_tilde , target_data_tensor = initial_state , weights = weights , ) # Evaluating extra data loss, when appliable extra_data = self . extra_data ( input_data = extra_input_data , target_data = extra_target_data ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # The complete loss function pde = residual_loss init = initial_data_loss bound = boundary_loss loss_terms = self . _aggregate_terms ( * pde , * init , * bound , * extra_data ) # Updating the loss weights if necessary loss_weights = self . global_weights ( initial_penalty = initial_penalty , operator = self . operator , loss_evaluator = self . loss_evaluator , residual = loss_terms , ) # Overall loss function loss = ( sum ( self . _eval_weighted_loss ( loss_terms , loss_weights )) + l2_reg + l1_reg ) # Back-propagation loss . backward () pde_detach = float ( sum ( pde ) . detach () . data ) init_detach = float ( sum ( init ) . detach () . data ) bound_detach = float ( sum ( bound ) . detach () . data ) extra_data_detach = float ( sum ( extra_data ) . detach () . data ) self . loss_states [ \"pde\" ] . append ( pde_detach ) self . loss_states [ \"init\" ] . append ( init_detach ) self . loss_states [ \"bound\" ] . append ( bound_detach ) self . loss_states [ \"extra_data\" ] . append ( extra_data_detach ) losses_list = np . array ( [ pde_detach , init_detach , bound_detach , extra_data_detach ] ) self . pprint ( loss_str = loss_str , losses_list = losses_list , call_back = call_back , loss_indices = loss_indices , loss_terms = loss_terms , loss_weights = loss_weights , ) _current_loss = loss return _current_loss return closure __init__ ( operator = None ) # Physics-Informed mean-squared error loss function Parameters: Name Type Description Default operator Module the operator used for evaluating the loss function (usually a neural network) None Source code in simulai/optimization/_losses.py 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Physics-Informed mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . split_dim = 1 self . operator = operator self . loss_evaluator = None self . residual = None self . tol = 1e-15 self . device = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"pde\" : list (), \"init\" : list (), \"bound\" : list (), \"extra_data\" : list (), } self . loss_tags = list ( self . loss_states . keys ()) self . hybrid_data_pinn = False self . losses_terms_indices = { \"pde\" : 0 , \"init\" : 1 , \"bound\" : 2 , \"extra_data\" : 3 , \"causality_weights\" : 4 , }","title":"Loss Functions"},{"location":"simulai_optimization/simulai_losses/#loss-functions","text":"","title":"Loss Functions"},{"location":"simulai_optimization/simulai_losses/#rmseloss","text":"Bases: LossBasics Source code in simulai/optimization/_losses.py 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 class RMSELoss ( LossBasics ): def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Vanilla mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . loss_states = { \"loss\" : list ()} def _data_loss ( self , output_tilde : torch . Tensor = None , norm_value : torch . Tensor = None , target_data_tensor : torch . Tensor = None , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator norm_value (torch.Tensor): the value used for normalizing the loss evaluation target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value for a given state \"\"\" if norm_value is not None : data_loss = torch . mean ( torch . square (( output_tilde - target_data_tensor ) / norm_value ) ) else : data_loss = torch . mean ( torch . square (( output_tilde - target_data_tensor ))) return data_loss def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , norm_value : list = None , lambda_1 : float = 0.0 , device : str = \"cpu\" , lambda_2 : float = 0.0 , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' Returns: Callable: the closure function used for evaluating the loss value \"\"\" l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) def closure (): output_tilde = self . operator . forward ( ** input_data ) data_loss = self . _data_loss ( output_tilde = output_tilde , norm_value = norm_value , target_data_tensor = target_data , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = data_loss + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return loss return closure","title":"RMSELoss"},{"location":"simulai_optimization/simulai_losses/#simulai.optimization.RMSELoss.__call__","text":"Main function for generating complete loss function workflow Parameters: Name Type Description Default input_data Union [ dict , Tensor ] the data used as input for self.operator None target_data Tensor the target data used for training self.oeprator None call_back str a string used for composing the logging of the optimization process '' norm_value list a list of values used for normalizing the loss temrms None lambda_1 float the penalty for the L^1 regularization term 0.0 lambda_2 float the penalty for the L^2 regularization term 0.0 device str the device in which the loss evaluation will be executed, 'cpu' or 'gpu' 'cpu' Returns: Name Type Description Callable Callable the closure function used for evaluating the loss Callable value Source code in simulai/optimization/_losses.py 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , norm_value : list = None , lambda_1 : float = 0.0 , device : str = \"cpu\" , lambda_2 : float = 0.0 , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' Returns: Callable: the closure function used for evaluating the loss value \"\"\" l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) def closure (): output_tilde = self . operator . forward ( ** input_data ) data_loss = self . _data_loss ( output_tilde = output_tilde , norm_value = norm_value , target_data_tensor = target_data , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = data_loss + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return loss return closure","title":"__call__()"},{"location":"simulai_optimization/simulai_losses/#simulai.optimization.RMSELoss.__init__","text":"Vanilla mean-squared error loss function Parameters: Name Type Description Default operator Module the operator used for evaluating the loss function (usually a neural network) None Source code in simulai/optimization/_losses.py 164 165 166 167 168 169 170 171 172 173 174 def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Vanilla mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . loss_states = { \"loss\" : list ()}","title":"__init__()"},{"location":"simulai_optimization/simulai_losses/#wrmseloss","text":"Bases: LossBasics Source code in simulai/optimization/_losses.py 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 class WRMSELoss ( LossBasics ): def __init__ ( self , operator = None ): \"\"\"Weighted mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . split_dim = 1 self . tol = 1e-25 self . loss_evaluator = None self . norm_evaluator = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"loss\" : list ()} def _data_loss ( self , output_tilde : torch . Tensor = None , weights : list = None , target_data_tensor : torch . Tensor = None , axis : int = - 1 , ) -> List : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator norm_value (torch.Tensor): the value used for normalizing the loss evaluation target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value for a given state \"\"\" output_split = torch . split ( output_tilde , self . split_dim , dim = axis ) target_split = torch . split ( target_data_tensor , self . split_dim , dim = axis ) data_losses = [ weights [ i ] * self . loss_evaluator ( out_split - tgt_split ) / self . norm_evaluator ( tgt_split ) for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] return data_losses def _no_data_loss_wrapper ( self , output_tilde : torch . Tensor = None , weights : list = None , target_data_tensor : torch . Tensor = None , axis : int = - 1 , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error without considering causality preserving Args: output_tilde (torch.Tensor): the output generated by self.operator weights (list): weights for rescaling each variable outputted by self.operator target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde axis (int): the axis in which the variables are split Returns: torch.Tensor: the loss function value for a given state \"\"\" return self . data_loss ( output_tilde = output_tilde , weights = weights , target_data_tensor = target_data_tensor , axis = axis , ) def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , lambda_1 : float = 0.0 , lambda_2 : float = 0.0 , axis : int = - 1 , relative : bool = False , device : str = \"cpu\" , weights : list = None , use_mean : bool = True , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' weights (list): a list of weights for rescaling each variable outputted by self.operator use_mean (bool): use mean for evaluating the losses or not (the alternative is sum) Returns: Callable: the closure function used for evaluating the loss value \"\"\" self . data_loss = self . _data_loss l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) # Using mean evaluation or not if use_mean == True : self . loss_evaluator = lambda res : torch . mean ( torch . square (( res ))) else : self . loss_evaluator = lambda res : torch . sum ( torch . square (( res ))) # Relative norm or not if relative == True : if use_mean == True : self . norm_evaluator = lambda ref : torch . mean ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : torch . sum ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : 1 self . data_loss_wrapper = self . _no_data_loss_wrapper def closure (): output_tilde = self . operator . forward ( ** input_data ) data_losses = self . data_loss_wrapper ( output_tilde = output_tilde , weights = weights , target_data_tensor = target_data , axis = axis , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = sum ( data_losses ) + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return closure","title":"WRMSELoss"},{"location":"simulai_optimization/simulai_losses/#simulai.optimization.WRMSELoss.__call__","text":"Main function for generating complete loss function workflow Parameters: Name Type Description Default input_data Union [ dict , Tensor ] the data used as input for self.operator None target_data Tensor the target data used for training self.oeprator None call_back str a string used for composing the logging of the optimization process '' norm_value list a list of values used for normalizing the loss temrms required lambda_1 float the penalty for the L^1 regularization term 0.0 lambda_2 float the penalty for the L^2 regularization term 0.0 device str the device in which the loss evaluation will be executed, 'cpu' or 'gpu' 'cpu' weights list a list of weights for rescaling each variable outputted by self.operator None use_mean bool use mean for evaluating the losses or not (the alternative is sum) True Returns: Name Type Description Callable Callable the closure function used for evaluating the loss Callable value Source code in simulai/optimization/_losses.py 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : torch . Tensor = None , call_back : str = \"\" , lambda_1 : float = 0.0 , lambda_2 : float = 0.0 , axis : int = - 1 , relative : bool = False , device : str = \"cpu\" , weights : list = None , use_mean : bool = True , ) -> Callable : \"\"\"Main function for generating complete loss function workflow Args: input_data (Union[dict, torch.Tensor]): the data used as input for self.operator target_data (torch.Tensor): the target data used for training self.oeprator call_back (str): a string used for composing the logging of the optimization process norm_value (list): a list of values used for normalizing the loss temrms lambda_1 (float): the penalty for the L^1 regularization term lambda_2 (float): the penalty for the L^2 regularization term device (str): the device in which the loss evaluation will be executed, 'cpu' or 'gpu' weights (list): a list of weights for rescaling each variable outputted by self.operator use_mean (bool): use mean for evaluating the losses or not (the alternative is sum) Returns: Callable: the closure function used for evaluating the loss value \"\"\" self . data_loss = self . _data_loss l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) # Using mean evaluation or not if use_mean == True : self . loss_evaluator = lambda res : torch . mean ( torch . square (( res ))) else : self . loss_evaluator = lambda res : torch . sum ( torch . square (( res ))) # Relative norm or not if relative == True : if use_mean == True : self . norm_evaluator = lambda ref : torch . mean ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : torch . sum ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : 1 self . data_loss_wrapper = self . _no_data_loss_wrapper def closure (): output_tilde = self . operator . forward ( ** input_data ) data_losses = self . data_loss_wrapper ( output_tilde = output_tilde , weights = weights , target_data_tensor = target_data , axis = axis , ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # Loss = ||\u0168_t - U_t||_2 + # lambda_1 *||W||_2 + lambda_2 * ||W||_1 loss = sum ( data_losses ) + l2_reg + l1_reg # Back-propagation loss . backward () self . loss_states [ \"loss\" ] . append ( float ( loss . detach () . data )) sys . stdout . write (( \" \\r loss: {} {} \" ) . format ( loss , call_back )) sys . stdout . flush () return closure","title":"__call__()"},{"location":"simulai_optimization/simulai_losses/#simulai.optimization.WRMSELoss.__init__","text":"Weighted mean-squared error loss function Parameters: Name Type Description Default operator Module the operator used for evaluating the loss function (usually a neural network) None Source code in simulai/optimization/_losses.py 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 def __init__ ( self , operator = None ): \"\"\"Weighted mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . operator = operator self . split_dim = 1 self . tol = 1e-25 self . loss_evaluator = None self . norm_evaluator = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"loss\" : list ()}","title":"__init__()"},{"location":"simulai_optimization/simulai_losses/#pirmseloss","text":"Bases: LossBasics Source code in simulai/optimization/_losses.py 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 class PIRMSELoss ( LossBasics ): def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Physics-Informed mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . split_dim = 1 self . operator = operator self . loss_evaluator = None self . residual = None self . tol = 1e-15 self . device = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"pde\" : list (), \"init\" : list (), \"bound\" : list (), \"extra_data\" : list (), } self . loss_tags = list ( self . loss_states . keys ()) self . hybrid_data_pinn = False self . losses_terms_indices = { \"pde\" : 0 , \"init\" : 1 , \"bound\" : 2 , \"extra_data\" : 3 , \"causality_weights\" : 4 , } def _convert ( self , input_data : Union [ dict , np . ndarray ] = None , device : str = None ) -> Union [ dict , torch . Tensor ]: \"\"\"It converts a dataset to the proper format (torch.Tensor) and send it to the chosen execution device ('gpu' or 'cpu') Args: input_data (Union[dict, np.ndarray]): the data structure to be converted device: the device in which the converted dataset must be placed Returns: Union[dict, torch.Tensor]: the converted data structure \"\"\" if type ( input_data ) == dict : return { key : torch . from_numpy ( item . astype ( ARRAY_DTYPE )) . to ( device ) for key , item in input_data . items () } else : return torch . from_numpy ( input_data . astype ( ARRAY_DTYPE )) . to ( device ) def _to_tensor ( self , * args , device : str = \"cpu\" ) -> List [ torch . Tensor ]: \"\"\"It converted a size indefined list of arrays to tensors Args: *args: list of arrays to be converted input_data (Union[dict, np.ndarray]) device: the device in which the converted dataset must be placed :type np.array, np.array, ..., np.array Returns: List[torch.Tensor]: a list of tensors \"\"\" return [ self . _convert ( input_data = arg , device = device ) for arg in args ] def _data_loss ( self , output_tilde : torch . Tensor = None , target_data_tensor : torch . Tensor = None , weights : List [ float ] = None , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value \"\"\" output_split = torch . split ( output_tilde , self . split_dim , dim =- 1 ) target_split = torch . split ( target_data_tensor , self . split_dim , dim =- 1 ) data_losses = [ self . loss_evaluator_data (( out_split , tgt_split )) / ( self . norm_evaluator ( tgt_split ) or torch . tensor ( 1.0 ) . to ( self . device )) for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] return self . weighted_loss_evaluator ( data_losses , weights ) def _data_loss_adaptive ( self , output_tilde : torch . Tensor = None , target_data_tensor : torch . Tensor = None , ** kwargs , ) -> torch . Tensor : \"\"\"It executes the evaluation of the data-driven mean-squared error Args: output_tilde (torch.Tensor): the output generated by self.operator target_data_tensor (torch.Tensor): the target tensor to be compared with output_tilde Returns: torch.Tensor: the loss function value \"\"\" output_split = torch . split ( output_tilde , self . split_dim , dim =- 1 ) target_split = torch . split ( target_data_tensor , self . split_dim , dim =- 1 ) data_discrepancy = [ out_split - tgt_split for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] weights = self . data_weights_estimator ( residual = data_discrepancy , loss_evaluator = self . loss_evaluator , loss_history = self . loss_states , operator = self . operator , ) data_losses = [ weights [ i ] * self . loss_evaluator_data (( out_split , tgt_split )) for i , ( out_split , tgt_split ) in enumerate ( zip ( output_split , target_split )) ] return [ sum ( data_losses )] def _global_weights_bypass ( self , initial_penalty : float = None , ** kwargs ) -> List [ float ]: return [ 1.0 , initial_penalty , 1.0 , 1.0 ] def _global_weights_estimator ( self , ** kwargs ) -> List [ float ]: weights = self . global_weights_estimator ( ** kwargs ) return weights def _residual_loss ( self , residual_approximation : List [ torch . Tensor ] = None , weights : list = None ) -> List [ torch . Tensor ]: \"\"\"It evaluates the physics-driven residual loss Args: residual_approximation (List[torch.Tensor]): a list of tensors containing the evaluation for the physical residual for each sample in the dataset weights (list): a list of weights used for rescaling the residuals of each variable Returns: torch.Tensor: the list of residual losses \"\"\" residual_losses = [ self . loss_evaluator ( res ) for res in residual_approximation ] return self . weighted_loss_evaluator ( residual_losses , weights ) def _residual_loss_adaptive ( self , residual_approximation : List [ torch . Tensor ] = None , weights : list = None ) -> List [ torch . Tensor ]: \"\"\"It evaluates the physics-driven residual loss Args: residual_approximation (List[torch.Tensor]): a list of tensors containing the evaluation for the physical residual for each sample in the dataset weights (list): a list of weights used for rescaling the residuals of each variable Returns: torch.Tensor: the list of residual losses \"\"\" weights = self . residual_weights_estimator ( residual = residual_approximation , loss_evaluator = self . loss_evaluator , loss_history = self . loss_states , operator = self . operator , ) residual_loss = [ weight * self . loss_evaluator ( res ) for weight , res in zip ( weights , residual_approximation ) ] return [ sum ( residual_loss )] def _extra_data ( self , input_data : torch . Tensor = None , target_data : torch . Tensor = None ) -> torch . Tensor : # Evaluating data for the initial condition output_tilde = self . operator ( input_data = input_data ) # Evaluating loss approximation for extra data data_loss = self . _data_loss ( output_tilde = output_tilde , target_data_tensor = target_data ) return data_loss def _boundary_penalisation ( self , boundary_input : dict = None , residual : SymbolicOperator = None ) -> List [ torch . Tensor ]: \"\"\"It applies the boundary conditions Args: boundary_input (dict): a dictionary containing the coordinates of the boundaries residual (SymbolicOperator): a symbolic expression for the boundary condition Returns: list: the evaluation of each boundary condition \"\"\" return [ residual . eval_expression ( k , boundary_input [ k ]) for k in boundary_input . keys () ] def _no_boundary_penalisation ( self , boundary_input : dict = None , residual : object = None ) -> List [ torch . Tensor ]: \"\"\"It is used for cases in which no boundary condition is applied \"\"\" return [ torch . Tensor ([ 0.0 ]) . to ( self . device ) for k in boundary_input . keys ()] def _no_boundary ( self , boundary_input : dict = None , residual : object = None ) -> List [ torch . Tensor ]: \"\"\"It is used for cases where there are not boundaries \"\"\" return torch . Tensor ([ 0.0 ]) . to ( self . device ) def _no_extra_data ( self , input_data : torch . Tensor = None , target_data : torch . Tensor = None ) -> torch . Tensor : return torch . Tensor ([ 0.0 ]) . to ( self . device ) def _no_residual_wrapper ( self , input_data : torch . Tensor = None ) -> torch . Tensor : return self . residual ( input_data ) def _causality_preserving_residual_wrapper ( self , input_data : torch . Tensor = None ) -> List : return self . causality_preserving ( self . residual ( input_data )) def _filter_necessary_loss_terms ( self , residual : SymbolicOperator = None ): tags = [ \"pde\" , \"init\" ] indices = [ 0 , 1 ] if residual . g_expressions : tags . append ( \"bound\" ) indices . append ( 2 ) else : pass if self . hybrid_data_pinn : tags . append ( \"extra_data\" ) indices . append ( 3 ) else : pass return tags , indices def _losses_states_str ( self , tags : List [ str ] = None ): losses_str = \" \\r \" for item in tags : losses_str += f \" { item } : {{}} \" return losses_str def __call__ ( self , input_data : Union [ dict , torch . Tensor ] = None , target_data : Union [ dict , torch . Tensor ] = None , verbose : bool = False , call_back : str = \"\" , residual : Callable = None , initial_input : Union [ dict , torch . Tensor ] = None , initial_state : Union [ dict , torch . Tensor ] = None , boundary_input : dict = None , boundary_penalties : list = [ 1 ], extra_input_data : Union [ dict , torch . Tensor ] = None , extra_target_data : Union [ dict , torch . Tensor ] = None , initial_penalty : float = 1 , axis : int = - 1 , relative : bool = False , lambda_1 : float = 0.0 , lambda_2 : float = 0.0 , weights = None , weights_residual = None , device : str = \"cpu\" , split_losses : bool = False , causality_preserving : Callable = None , global_weights_estimator : Callable = None , residual_weights_estimator : Callable = None , data_weights_estimator : Callable = None , use_mean : bool = True , use_data_log : bool = False , ) -> Callable : self . residual = residual self . device = device self . causality_preserving = causality_preserving # Handling expection when AnnealingWeights and split_losses # are used together. if isinstance ( global_weights_estimator , AnnealingWeights ): if split_losses : raise RuntimeError ( \"Global weights estimator, AnnealingWeights, is not\" + \"compatible with split loss terms.\" ) else : pass self . global_weights_estimator = global_weights_estimator self . residual_weights_estimator = residual_weights_estimator self . data_weights_estimator = data_weights_estimator if split_losses : self . weighted_loss_evaluator = self . _bypass_weighted_loss else : self . weighted_loss_evaluator = self . _eval_weighted_loss if ( isinstance ( extra_input_data , np . ndarray ) == isinstance ( extra_target_data , np . ndarray ) == True ): self . hybrid_data_pinn = True else : pass # When no weight is provided, they are # set to the default choice if weights is None : weights = len ( residual . output_names ) * [ 1 ] if weights_residual is None : weights_residual = len ( residual . output_names ) * [ 1 ] loss_tags , loss_indices = self . _filter_necessary_loss_terms ( residual = residual ) loss_str = self . _losses_states_str ( tags = loss_tags ) # Boundary conditions are optional, since they are not # defined in some cases, as ODE, for example. if residual . g_expressions : boundary = self . _boundary_penalisation else : if boundary_input == None : boundary = self . _no_boundary else : boundary = self . _no_boundary_penalisation if self . causality_preserving : call_back = f \", causality_weights: { self . causality_preserving . call_back } \" self . residual_wrapper = self . _causality_preserving_residual_wrapper else : self . residual_wrapper = self . _no_residual_wrapper l1_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_1 ), term_type = type ( self . operator . weights_l1 ) ) l2_reg_multiplication = self . _exec_multiplication_in_regularization ( lambda_type = type ( lambda_2 ), term_type = type ( self . operator . weights_l2 ) ) if type ( input_data ) is dict : try : input_data = input_data [ \"input_data\" ] except Exception : pass initial_input , initial_state = self . _to_tensor ( initial_input , initial_state , device = device ) # Preparing extra data, when necessary if self . hybrid_data_pinn : extra_input_data , extra_target_data = self . _to_tensor ( extra_input_data , extra_target_data , device = device ) self . extra_data = self . _extra_data else : self . extra_data = self . _no_extra_data if use_data_log == True : self . inner_square = self . _two_term_log_loss else : self . inner_square = self . _two_term_loss if use_mean == True : self . loss_evaluator = lambda res : torch . mean ( self . _single_term_loss ( res )) else : self . loss_evaluator = lambda res : torch . sum ( self . _single_term_loss ( res )) if use_mean == True : self . loss_evaluator_data = lambda res : torch . mean ( self . inner_square ( * res )) else : self . loss_evaluator_data = lambda res : torch . sum ( self . inner_square ( * res )) # Relative norm or not if relative == True : if use_mean == True : self . norm_evaluator = lambda ref : torch . mean ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : torch . sum ( torch . square (( ref ))) else : self . norm_evaluator = lambda ref : 1 # Determing the usage of special residual loss weighting if residual_weights_estimator : self . residual_loss = self . _residual_loss_adaptive else : self . residual_loss = self . _residual_loss # Determing the usage of special data loss weighting if data_weights_estimator : self . data_loss = self . _data_loss_adaptive else : self . data_loss = self . _data_loss # Determining the usage of special global loss weighting if global_weights_estimator : self . global_weights = self . _global_weights_estimator else : self . global_weights = self . _global_weights_bypass if verbose : self . pprint = self . _pprint_verbose else : self . pprint = self . _pprint_simple def closure (): # Executing the symbolic residual evaluation residual_approximation = self . residual_wrapper ( input_data ) # Boundary, if appliable boundary_approximation = boundary ( boundary_input = boundary_input , residual = residual ) # Evaluating data for the initial condition initial_output_tilde = self . operator ( input_data = initial_input ) # Evaluating loss function for residual residual_loss = self . residual_loss ( residual_approximation = residual_approximation , weights = weights_residual ) # Evaluating loss for the boundary approaximation, if appliable boundary_loss = self . _residual_loss ( residual_approximation = boundary_approximation , weights = boundary_penalties , ) # Evaluating loss approximation for initial condition initial_data_loss = self . data_loss ( output_tilde = initial_output_tilde , target_data_tensor = initial_state , weights = weights , ) # Evaluating extra data loss, when appliable extra_data = self . extra_data ( input_data = extra_input_data , target_data = extra_target_data ) # L\u00b2 and L\u00b9 regularization term weights_l2 = self . operator . weights_l2 weights_l1 = self . operator . weights_l1 # beta *||W||_2 + alpha * ||W||_1 l2_reg = l2_reg_multiplication ( lambda_2 , weights_l2 ) l1_reg = l1_reg_multiplication ( lambda_1 , weights_l1 ) # The complete loss function pde = residual_loss init = initial_data_loss bound = boundary_loss loss_terms = self . _aggregate_terms ( * pde , * init , * bound , * extra_data ) # Updating the loss weights if necessary loss_weights = self . global_weights ( initial_penalty = initial_penalty , operator = self . operator , loss_evaluator = self . loss_evaluator , residual = loss_terms , ) # Overall loss function loss = ( sum ( self . _eval_weighted_loss ( loss_terms , loss_weights )) + l2_reg + l1_reg ) # Back-propagation loss . backward () pde_detach = float ( sum ( pde ) . detach () . data ) init_detach = float ( sum ( init ) . detach () . data ) bound_detach = float ( sum ( bound ) . detach () . data ) extra_data_detach = float ( sum ( extra_data ) . detach () . data ) self . loss_states [ \"pde\" ] . append ( pde_detach ) self . loss_states [ \"init\" ] . append ( init_detach ) self . loss_states [ \"bound\" ] . append ( bound_detach ) self . loss_states [ \"extra_data\" ] . append ( extra_data_detach ) losses_list = np . array ( [ pde_detach , init_detach , bound_detach , extra_data_detach ] ) self . pprint ( loss_str = loss_str , losses_list = losses_list , call_back = call_back , loss_indices = loss_indices , loss_terms = loss_terms , loss_weights = loss_weights , ) _current_loss = loss return _current_loss return closure","title":"PIRMSELoss"},{"location":"simulai_optimization/simulai_losses/#simulai.optimization.PIRMSELoss.__init__","text":"Physics-Informed mean-squared error loss function Parameters: Name Type Description Default operator Module the operator used for evaluating the loss function (usually a neural network) None Source code in simulai/optimization/_losses.py 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 def __init__ ( self , operator : torch . nn . Module = None ) -> None : \"\"\"Physics-Informed mean-squared error loss function Args: operator (torch.nn.Module): the operator used for evaluating the loss function (usually a neural network) \"\"\" super () . __init__ () self . split_dim = 1 self . operator = operator self . loss_evaluator = None self . residual = None self . tol = 1e-15 self . device = None self . axis_loss_evaluator = lambda res : torch . mean ( torch . square (( res )), dim = 1 ) self . loss_states = { \"pde\" : list (), \"init\" : list (), \"bound\" : list (), \"extra_data\" : list (), } self . loss_tags = list ( self . loss_states . keys ()) self . hybrid_data_pinn = False self . losses_terms_indices = { \"pde\" : 0 , \"init\" : 1 , \"bound\" : 2 , \"extra_data\" : 3 , \"causality_weights\" : 4 , }","title":"__init__()"},{"location":"simulai_optimization/simulai_optimizer/","text":"Optimization Interfaces # Optimizer # Source code in simulai/optimization/_optimization.py 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 class Optimizer : def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False def _verify_GPU_memory_availability ( self , device : str = None ): total = torch . cuda . get_device_properties ( device ) . total_memory reserved = torch . cuda . memory_reserved ( device ) allocated = torch . cuda . memory_allocated ( device ) return total - reserved - allocated def _try_to_transfer_to_GPU ( self , data : Union [ dict , torch . Tensor ], device : str = None ) -> None : available_GPU_memory = self . _verify_GPU_memory_availability ( device = device ) if isinstance ( data , dict ): data_size = sum ([ t . element_size () * t . nelement () for t in data . values ()]) if data_size < available_GPU_memory : data_ = { k : t . to ( device ) for k , t in data . items ()} print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data elif isinstance ( data , torch . Tensor ): data_size = data . element_size () * data . nelement () if data_size < available_GPU_memory : data_ = data . to ( device ) print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data else : return data def _seek_by_extra_trainable_parameters ( self , residual : SymbolicOperator = None ) -> Union [ list , None ]: if hasattr ( residual , \"constants\" ): extra_parameters = [ c for c in residual . trainable_parameters . values () if isinstance ( c , Parameter ) ] if extra_parameters : print ( \"There are extra trainable parameters.\" ) return extra_parameters else : return None def _get_lr_decay ( self ) -> Union [ callable , None ]: if self . lr_decay_scheduler_params is not None : name = self . lr_decay_scheduler_params . pop ( \"name\" ) self . decay_frequency = self . lr_decay_scheduler_params . pop ( \"decay_frequency\" ) lr_class = getattr ( torch . optim . lr_scheduler , name ) return lr_class else : return None def _exec_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . randperm ( size ) def _summary_writer ( self , loss_states : dict = None , epoch : int = None ) -> None : for k , v in loss_states . items (): loss = v [ epoch ] self . writer . add_scalar ( k , loss , epoch ) # It handles early-stopping for the optimization loop def _early_stopping_handler ( self , val_loss_function : callable = None ) -> None : loss = val_loss_function () self . accuracy_str = \"acc: {} \" . format ( loss ) if loss < self . validation_score : self . validation_score = loss self . awaited_steps = 0 return False elif ( loss > self . validation_score ) and ( self . awaited_steps <= self . early_stopping_params [ \"patience\" ] ): self . validation_score = loss self . awaited_steps += 1 return False else : print ( \"Early-stopping was actioned.\" ) return True def _lr_decay_handler ( self , epoch : int = None ): if ( epoch % self . decay_frequency == 0 ) and ( epoch > 0 ): self . lr_decay_scheduler . step () def _checkpoint_handler ( self , save_dir : str = None , name : str = None , model : NetworkTemplate = None , template : callable = None , compact : bool = False , epoch : int = None , ) -> None : if epoch % self . checkpoint_frequency == 0 : tag = self . overwrite_rule ( epoch ) saver = SPFile ( compact = compact ) saver . write ( save_dir = save_dir , name = name + tag , model = model , template = template ) def _no_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . arange ( size ) def _bypass_summary_writer ( self , ** kwargs ) -> None : pass # Doing nothing to early-stopping def _bypass_stop_handler ( self , ** kwargs ): return False # Doing nothing with lr def _bypass_lr_decay_handler ( self , ** kwargs ): pass # Doing nothing to checkpoint def _bypass_checkpoint_handler ( self , ** kwargs ): pass # When data is a NumPy array def _get_vector_data ( self , dataset : Union [ np . ndarray , torch . Tensor ] = None , indices : np . ndarray = None , ) -> torch . Tensor : if dataset is None : return None elif isinstance ( dataset , Dataset ): return dataset ()[ indices ] else : return dataset [ indices ] # When data is stored in a HDF5 dataset def _get_ondisk_data ( self , dataset : callable = None , indices : np . ndarray = None ) -> torch . Tensor : return dataset ( indices = indices ) # Preparing the batches (converting format and moving to the correct device) # in a single batch optimization loop def _make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : item . to ( device ) for key , item in input_data . items ()} else : input_data_dict = { self . input_data_name : input_data . to ( device )} return input_data_dict # Preparing the batches (converting format and moving to the correct device) def _batchwise_make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" , batch_indices : torch . Tensor = None , ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : self . get_data ( dataset = item , indices = batch_indices ) . to ( device ) for key , item in input_data . items () } else : input_data_dict = { self . input_data_name : self . get_data ( dataset = input_data , indices = batch_indices ) . to ( device ) } return input_data_dict # Getting up optimizer from the supported engines def _get_optimizer ( self , optimizer : str = None ) -> torch . nn . Module : try : for optim_module in self . optim_modules : mod_items = dir ( optim_module ) mod_items_lower = [ item . lower () for item in mod_items ] if optimizer in mod_items_lower : print ( f \"Optimizer { optimizer } found in { optim_module . __name__ } .\" ) optimizer_name = mod_items [ mod_items_lower . index ( optimizer )] return getattr ( optim_module , optimizer_name ) else : print ( f \"Optimizer { optimizer } not found in { optim_module . __name__ } .\" ) except : raise Exception ( f \"There is no correspondent to { optimizer } in any known optimization module.\" ) # Getting up loss function from the correspondent module def _get_loss ( self , loss : str = None ) -> callable : if type ( loss ) == str : name = loss . upper () return getattr ( self . losses_module , name + \"Loss\" ) elif callable ( loss ): return loss else : return f \"loss must be str or callable, but received { type ( loss ) } \" # Single batch optimization loop def _optimization_loop ( self , n_epochs : int = None , loss_function : callable = None , op : NetworkTemplate = None , loss_states : dict = None , validation_loss_function : callable = None , ) -> None : for epoch in range ( n_epochs ): self . optimizer_instance . zero_grad () self . optimizer_instance . step ( loss_function ) self . checkpoint_handler ( model = op , epoch = epoch , ** self . checkpoint_params ) self . summary_writer ( loss_states = loss_states , epoch = epoch ) self . lr_decay_handler ( epoch = epoch ) self . loss_states = loss_states # Basic version of the mini-batch optimization loop # TODO It could be parallelized def _batchwise_optimization_loop ( self , n_epochs : int = None , batch_size : int = None , loss : Union [ str , type ] = None , op : NetworkTemplate = None , input_data : torch . Tensor = None , target_data : torch . Tensor = None , validation_data : Tuple [ torch . Tensor ] = None , params : dict = None , device : str = \"cpu\" , ) -> None : print ( \"Executing batchwise optimization loop.\" ) if isinstance ( loss , str ): loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) else : assert isinstance ( loss , type ), \"The object provided is not a LossBasics object.\" loss_class = loss try : loss_instance = loss_class ( operator = op ) except : raise Exception ( f \"It was not possible to instantiate the class { loss } .\" ) if validation_data is not None : validation_input_data , validation_target_data = validation_data validation_input_data = self . _make_input_data ( validation_input_data , device = device ) validation_target_data = validation_target_data . to ( device ) val_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : val_loss_function = None batches = np . array_split ( np . arange ( self . n_samples ), int ( self . n_samples / batch_size ) ) # Number of batchwise optimization epochs n_batch_epochs = len ( batches ) epoch = 0 # Outer loop iteration b_epoch = 0 # Total iteration stop_criterion = False # When using mini-batches, it is necessary to # determine the number of iterations for the outer optimization # loop if n_batch_epochs > n_epochs : n_epochs_global = 1 else : n_epochs_global = int ( math . ceil ( n_epochs / n_batch_epochs )) while epoch < n_epochs_global and stop_criterion == False : # For each batch-wise realization it is possible to determine a # new permutation for the samples samples_permutation = self . sampler ( size = self . n_samples ) for ibatch in batches : self . optimizer_instance . zero_grad () indices = samples_permutation [ ibatch ] input_batch = self . _batchwise_make_input_data ( input_data , device = device , batch_indices = indices ) target_batch = self . get_data ( dataset = target_data , indices = indices ) if target_batch is not None : target_batch = target_batch . to ( device ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_batch , target_data = target_batch , call_back = self . accuracy_str , ** params , ) self . optimizer_instance . step ( loss_function ) self . summary_writer ( loss_states = loss_instance . loss_states , epoch = b_epoch ) self . checkpoint_handler ( model = op , epoch = b_epoch , ** self . checkpoint_params ) self . lr_decay_handler ( epoch = b_epoch ) stop_criterion = self . stop_handler ( val_loss_function = val_loss_function ) b_epoch += 1 epoch += 1 if hasattr ( loss_instance , \"loss_states\" ): if all ( [ isinstance ( item , list ) for item in loss_instance . loss_states . values ()] ): self . loss_states = { key : np . hstack ( value ) for key , value in loss_instance . loss_states . items () } else : self . loss_states = loss_instance . loss_states # Main fit method @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , ) __init__ ( optimizer = None , early_stopping = False , summary_writer = False , shuffle = True , lr_decay_scheduler_params = None , params = None , early_stopping_params = None , checkpoint_params = None ) # Parameters: Name Type Description Default optimizer str A name for a PyTorch optimizer. None early_stopping bool Early-stopping will be used or not. False summary_writer bool Write a Tensorboard run file or not. False shuffle bool Shuffle the dataset or not. True lr_decay_scheduler_params dict The parameters used for defining a learning rate decay scheme. None params dict Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). None early_stopping_params dict Parameters required by the early-stopping scheme. None checkpoint_params dict Parameters for configuring the checkpointing scheme. None Source code in simulai/optimization/_optimization.py 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False fit ( op = None , input_data = None , target_data = None , validation_data = None , n_epochs = None , loss = 'rmse' , params = None , batch_size = None , device = 'cpu' , distributed = False , use_jit = False ) # Parameters: Name Type Description Default op NetworkTemplate The model which will be trained None input_data Union [ dict , Tensor , ndarray , callable ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray , callable ] The target data for the problem. None validation_data Tuple [ Union [ Tensor , ndarray , callable ]] The validation data used for the problem (if required). None n_epochs int Number of epochs for the optimization process. None loss str A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray 'rmse' params dict Extra parameters required for task-specific problems (as Physics-informed neural networks). None batch_size int The size of the batch used in each optimization epoch None device str The device in which the optimization will run, 'cpu' or 'gpu'. 'cpu' distributed bool Use distributed (multi-node) training or not. False use_jit bool Use PyTorch JIT (Just in time compilation) or not. False Source code in simulai/optimization/_optimization.py 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , ) ScipyInterface # Source code in simulai/optimization/_optimization.py 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 class ScipyInterface : def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64 def _stack_and_convert_parameters ( self , parameters : List [ Union [ torch . Tensor , np . ndarray ]] ) -> np . ndarray : \"\"\" It produces a stack of all the model parameters. Args: parameters (List[Union[torch.Tensor, np.ndarray]]): A list containing all the model parameters in their original shapes. Returns: np.ndarray: A stack (single vertical array) of all the model parameters. \"\"\" return np . hstack ( [ param . detach () . numpy () . astype ( np . float64 ) . flatten () for param in parameters . values () ] ) def _update_and_set_parameters ( self , parameters : np . ndarray ) -> None : \"\"\" It updates the parameters with the new values estimated by the optimizer. Args: parameters (np.ndarray): The stack of all the model parameters. \"\"\" operators = [ torch . from_numpy ( parameters [ slice ( * interval )] . reshape ( shape ) . astype ( self . default_dtype ) ) . to ( self . device ) for interval , shape in zip ( self . operators_intervals , self . operators_shapes . values () ) ] for opi , parameter in enumerate ( self . fun . parameters ()): parameter . data . copy_ ( operators [ opi ]) def _exec_kwargs_forward ( self , input_data : dict = None ): \"\"\"It executes the forward pass for the model when it receives more than one input. Args: input_data dict: Data to be passed to the model. \"\"\" return self . fun . forward ( ** input_data ) def _exec_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ): \"\"\"It executes the forward pass for the model. Args: input_data (Union[np.ndarray, torch.Tensor]): Data to be passed to the model. \"\"\" return self . fun . forward ( input_data = input_data ) def _fun_num ( self , parameters : np . ndarray ) -> Tuple [ float ]: \"\"\" Args: parameters (np.ndarray): The stacked parameters defined for the model. Returns: Tuple[float]: The loss(es) defined for the optimization process. \"\"\" self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () return loss . detach () . cpu () . numpy () . astype ( np . float64 ) def _fun ( self , parameters : np . ndarray ) -> Tuple [ float , np . ndarray ]: \"\"\" Args: parameters (np.ndarray): The stack of all the trainable parameters for the model. Returns: Tuple[float, np.ndarray]: A tuple containing the value for the loss function and the array of gradients for the model parameters. \"\"\" # Setting the new values for the model parameters self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () grads = [ v . grad . detach () . cpu () . numpy () for v in self . fun . parameters ()] gradients = np . hstack ( [ v . flatten () for v , shape in zip ( grads , list ( self . operators_shapes . values ())) ] ) return loss . detach () . cpu () . numpy () . astype ( np . float64 ), gradients . astype ( np . float64 ) def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x ) __init__ ( fun = None , optimizer = None , optimizer_config = dict (), loss = None , loss_config = None , device = 'cpu' , jac = None ) # An interface for using SciPy-defined optimization algorithms. Parameters: Name Type Description Default fun NetworkTemplate A model (neural network) to be trained. None optimizer str A name for an optimizar available on SciPy. None optimizer_config dict A configuration dictionary for the chosen optimizer. dict () loss callable A loss function implemented in the form of a Python function or class. None loss_config dict A configuration dictionary for the loss function. None device str The device in which the optimization will be executed ('cpu' or 'gpu'). 'cpu' jac str If necessary, define a method for evaluating the Jacobian available on SciPy. None Raises: Type Description Exception If a not recognized device is defined as 'device'. Source code in simulai/optimization/_optimization.py 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64 fit ( input_data = None , target_data = None ) # Parameters: Name Type Description Default input_data Union [ dict , Tensor , ndarray ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray ] The target data used for training the model. None Source code in simulai/optimization/_optimization.py 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x )","title":"Optimization Interfaces"},{"location":"simulai_optimization/simulai_optimizer/#optimization-interfaces","text":"","title":"Optimization Interfaces"},{"location":"simulai_optimization/simulai_optimizer/#optimizer","text":"Source code in simulai/optimization/_optimization.py 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 class Optimizer : def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False def _verify_GPU_memory_availability ( self , device : str = None ): total = torch . cuda . get_device_properties ( device ) . total_memory reserved = torch . cuda . memory_reserved ( device ) allocated = torch . cuda . memory_allocated ( device ) return total - reserved - allocated def _try_to_transfer_to_GPU ( self , data : Union [ dict , torch . Tensor ], device : str = None ) -> None : available_GPU_memory = self . _verify_GPU_memory_availability ( device = device ) if isinstance ( data , dict ): data_size = sum ([ t . element_size () * t . nelement () for t in data . values ()]) if data_size < available_GPU_memory : data_ = { k : t . to ( device ) for k , t in data . items ()} print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data elif isinstance ( data , torch . Tensor ): data_size = data . element_size () * data . nelement () if data_size < available_GPU_memory : data_ = data . to ( device ) print ( \"Data transferred to GPU.\" ) return data_ else : print ( \"It was not possible to move data to GPU: insufficient memory.\" ) print ( f \" { available_GPU_memory } < { data_size } , in bytes\" ) return data else : return data def _seek_by_extra_trainable_parameters ( self , residual : SymbolicOperator = None ) -> Union [ list , None ]: if hasattr ( residual , \"constants\" ): extra_parameters = [ c for c in residual . trainable_parameters . values () if isinstance ( c , Parameter ) ] if extra_parameters : print ( \"There are extra trainable parameters.\" ) return extra_parameters else : return None def _get_lr_decay ( self ) -> Union [ callable , None ]: if self . lr_decay_scheduler_params is not None : name = self . lr_decay_scheduler_params . pop ( \"name\" ) self . decay_frequency = self . lr_decay_scheduler_params . pop ( \"decay_frequency\" ) lr_class = getattr ( torch . optim . lr_scheduler , name ) return lr_class else : return None def _exec_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . randperm ( size ) def _summary_writer ( self , loss_states : dict = None , epoch : int = None ) -> None : for k , v in loss_states . items (): loss = v [ epoch ] self . writer . add_scalar ( k , loss , epoch ) # It handles early-stopping for the optimization loop def _early_stopping_handler ( self , val_loss_function : callable = None ) -> None : loss = val_loss_function () self . accuracy_str = \"acc: {} \" . format ( loss ) if loss < self . validation_score : self . validation_score = loss self . awaited_steps = 0 return False elif ( loss > self . validation_score ) and ( self . awaited_steps <= self . early_stopping_params [ \"patience\" ] ): self . validation_score = loss self . awaited_steps += 1 return False else : print ( \"Early-stopping was actioned.\" ) return True def _lr_decay_handler ( self , epoch : int = None ): if ( epoch % self . decay_frequency == 0 ) and ( epoch > 0 ): self . lr_decay_scheduler . step () def _checkpoint_handler ( self , save_dir : str = None , name : str = None , model : NetworkTemplate = None , template : callable = None , compact : bool = False , epoch : int = None , ) -> None : if epoch % self . checkpoint_frequency == 0 : tag = self . overwrite_rule ( epoch ) saver = SPFile ( compact = compact ) saver . write ( save_dir = save_dir , name = name + tag , model = model , template = template ) def _no_shuffling ( self , size : int = None ) -> torch . Tensor : return torch . arange ( size ) def _bypass_summary_writer ( self , ** kwargs ) -> None : pass # Doing nothing to early-stopping def _bypass_stop_handler ( self , ** kwargs ): return False # Doing nothing with lr def _bypass_lr_decay_handler ( self , ** kwargs ): pass # Doing nothing to checkpoint def _bypass_checkpoint_handler ( self , ** kwargs ): pass # When data is a NumPy array def _get_vector_data ( self , dataset : Union [ np . ndarray , torch . Tensor ] = None , indices : np . ndarray = None , ) -> torch . Tensor : if dataset is None : return None elif isinstance ( dataset , Dataset ): return dataset ()[ indices ] else : return dataset [ indices ] # When data is stored in a HDF5 dataset def _get_ondisk_data ( self , dataset : callable = None , indices : np . ndarray = None ) -> torch . Tensor : return dataset ( indices = indices ) # Preparing the batches (converting format and moving to the correct device) # in a single batch optimization loop def _make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : item . to ( device ) for key , item in input_data . items ()} else : input_data_dict = { self . input_data_name : input_data . to ( device )} return input_data_dict # Preparing the batches (converting format and moving to the correct device) def _batchwise_make_input_data ( self , input_data : Union [ dict , torch . Tensor ], device = \"cpu\" , batch_indices : torch . Tensor = None , ) -> dict : if type ( input_data ) is dict : input_data_dict = { key : self . get_data ( dataset = item , indices = batch_indices ) . to ( device ) for key , item in input_data . items () } else : input_data_dict = { self . input_data_name : self . get_data ( dataset = input_data , indices = batch_indices ) . to ( device ) } return input_data_dict # Getting up optimizer from the supported engines def _get_optimizer ( self , optimizer : str = None ) -> torch . nn . Module : try : for optim_module in self . optim_modules : mod_items = dir ( optim_module ) mod_items_lower = [ item . lower () for item in mod_items ] if optimizer in mod_items_lower : print ( f \"Optimizer { optimizer } found in { optim_module . __name__ } .\" ) optimizer_name = mod_items [ mod_items_lower . index ( optimizer )] return getattr ( optim_module , optimizer_name ) else : print ( f \"Optimizer { optimizer } not found in { optim_module . __name__ } .\" ) except : raise Exception ( f \"There is no correspondent to { optimizer } in any known optimization module.\" ) # Getting up loss function from the correspondent module def _get_loss ( self , loss : str = None ) -> callable : if type ( loss ) == str : name = loss . upper () return getattr ( self . losses_module , name + \"Loss\" ) elif callable ( loss ): return loss else : return f \"loss must be str or callable, but received { type ( loss ) } \" # Single batch optimization loop def _optimization_loop ( self , n_epochs : int = None , loss_function : callable = None , op : NetworkTemplate = None , loss_states : dict = None , validation_loss_function : callable = None , ) -> None : for epoch in range ( n_epochs ): self . optimizer_instance . zero_grad () self . optimizer_instance . step ( loss_function ) self . checkpoint_handler ( model = op , epoch = epoch , ** self . checkpoint_params ) self . summary_writer ( loss_states = loss_states , epoch = epoch ) self . lr_decay_handler ( epoch = epoch ) self . loss_states = loss_states # Basic version of the mini-batch optimization loop # TODO It could be parallelized def _batchwise_optimization_loop ( self , n_epochs : int = None , batch_size : int = None , loss : Union [ str , type ] = None , op : NetworkTemplate = None , input_data : torch . Tensor = None , target_data : torch . Tensor = None , validation_data : Tuple [ torch . Tensor ] = None , params : dict = None , device : str = \"cpu\" , ) -> None : print ( \"Executing batchwise optimization loop.\" ) if isinstance ( loss , str ): loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) else : assert isinstance ( loss , type ), \"The object provided is not a LossBasics object.\" loss_class = loss try : loss_instance = loss_class ( operator = op ) except : raise Exception ( f \"It was not possible to instantiate the class { loss } .\" ) if validation_data is not None : validation_input_data , validation_target_data = validation_data validation_input_data = self . _make_input_data ( validation_input_data , device = device ) validation_target_data = validation_target_data . to ( device ) val_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : val_loss_function = None batches = np . array_split ( np . arange ( self . n_samples ), int ( self . n_samples / batch_size ) ) # Number of batchwise optimization epochs n_batch_epochs = len ( batches ) epoch = 0 # Outer loop iteration b_epoch = 0 # Total iteration stop_criterion = False # When using mini-batches, it is necessary to # determine the number of iterations for the outer optimization # loop if n_batch_epochs > n_epochs : n_epochs_global = 1 else : n_epochs_global = int ( math . ceil ( n_epochs / n_batch_epochs )) while epoch < n_epochs_global and stop_criterion == False : # For each batch-wise realization it is possible to determine a # new permutation for the samples samples_permutation = self . sampler ( size = self . n_samples ) for ibatch in batches : self . optimizer_instance . zero_grad () indices = samples_permutation [ ibatch ] input_batch = self . _batchwise_make_input_data ( input_data , device = device , batch_indices = indices ) target_batch = self . get_data ( dataset = target_data , indices = indices ) if target_batch is not None : target_batch = target_batch . to ( device ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_batch , target_data = target_batch , call_back = self . accuracy_str , ** params , ) self . optimizer_instance . step ( loss_function ) self . summary_writer ( loss_states = loss_instance . loss_states , epoch = b_epoch ) self . checkpoint_handler ( model = op , epoch = b_epoch , ** self . checkpoint_params ) self . lr_decay_handler ( epoch = b_epoch ) stop_criterion = self . stop_handler ( val_loss_function = val_loss_function ) b_epoch += 1 epoch += 1 if hasattr ( loss_instance , \"loss_states\" ): if all ( [ isinstance ( item , list ) for item in loss_instance . loss_states . values ()] ): self . loss_states = { key : np . hstack ( value ) for key , value in loss_instance . loss_states . items () } else : self . loss_states = loss_instance . loss_states # Main fit method @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , )","title":"Optimizer"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.Optimizer.__init__","text":"Parameters: Name Type Description Default optimizer str A name for a PyTorch optimizer. None early_stopping bool Early-stopping will be used or not. False summary_writer bool Write a Tensorboard run file or not. False shuffle bool Shuffle the dataset or not. True lr_decay_scheduler_params dict The parameters used for defining a learning rate decay scheme. None params dict Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). None early_stopping_params dict Parameters required by the early-stopping scheme. None checkpoint_params dict Parameters for configuring the checkpointing scheme. None Source code in simulai/optimization/_optimization.py 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 def __init__ ( self , optimizer : str = None , early_stopping : bool = False , summary_writer : bool = False , shuffle : bool = True , lr_decay_scheduler_params : dict = None , params : dict = None , early_stopping_params : dict = None , checkpoint_params : dict = None , ) -> None : \"\"\" Args: optimizer (str): A name for a PyTorch optimizer. early_stopping (bool): Early-stopping will be used or not. summary_writer (bool): Write a Tensorboard run file or not. shuffle (bool): Shuffle the dataset or not. lr_decay_scheduler_params (dict): The parameters used for defining a learning rate decay scheme. params (dict): Extra parameters which provide information for task-specific problems (as Physics-Informed neural networks). early_stopping_params (dict): Parameters required by the early-stopping scheme. checkpoint_params (dict): Parameters for configuring the checkpointing scheme. \"\"\" if \"n_samples\" in list ( params . keys ()): self . n_samples = params . pop ( \"n_samples\" ) else : self . n_samples = None self . optimizer = optimizer self . params = params self . early_stopping = early_stopping self . early_stopping_params = early_stopping_params self . checkpoint_params = checkpoint_params self . summary_writer = summary_writer self . shuffle = shuffle self . lr_decay_scheduler_params = lr_decay_scheduler_params self . lr_decay_scheduler = None self . optim_module_names = [ \"torch.optim\" , \"simulai.optimization._builtin_pytorch\" , ] self . input_data_name = \"input_data\" self . optim_modules = [ importlib . import_module ( module ) for module in self . optim_module_names ] self . optim_class = self . _get_optimizer ( optimizer = optimizer ) self . get_data = self . _get_vector_data self . losses_module = importlib . import_module ( \"simulai.optimization\" ) # Using early_stopping or not if self . early_stopping is True : self . stop_handler = self . _early_stopping_handler else : self . stop_handler = self . _bypass_stop_handler # Using summary writing (necessary for tensorboard), or not if self . summary_writer is True : try : from torch.utils.tensorboard import SummaryWriter except : raise Exception ( \"It is necessary to have tensorboard installed to use summary writing.\" ) self . writer = SummaryWriter () self . summary_writer = self . _summary_writer else : self . summary_writer = self . _bypass_summary_writer # Determining the kind of sampling will be executed if self . shuffle : self . sampler = self . _exec_shuffling else : self . sampler = self . _no_shuffling # Using lr decay or not if self . lr_decay_scheduler_params is not None : self . lr_decay_handler = self . _lr_decay_handler else : self . lr_decay_handler = self . _bypass_lr_decay_handler # Using checkpoint or not if self . checkpoint_params is not None : if \"checkpoint_frequency\" in self . checkpoint_params . keys (): self . checkpoint_frequency = self . checkpoint_params . pop ( \"checkpoint_frequency\" ) else : raise Exception ( \"Checkpoint frequency not defined. Please give a value for it.\" ) self . checkpoint_handler = self . _checkpoint_handler else : self . checkpoint_params = dict () self . checkpoint_handler = self . _bypass_checkpoint_handler # When checkpoints are used, it is possible to overwrite them or # creating multiple checkpoints in different states overwrite_savepoint = lambda epoch : \"\" not_overwrite_savepoint = lambda epoch : f \"_ckp_epoch_ { epoch } \" # Rules for overwritting or not checkpoints if \"overwrite\" in self . checkpoint_params . keys (): overwrite = self . checkpoint_params . pop ( \"overwrite\" ) if overwrite == True : self . overwrite_rule = overwrite_savepoint else : self . overwrite_rule = not_overwrite_savepoint else : self . overwrite_rule = overwrite_savepoint self . validation_score = np . inf self . awaited_steps = 0 self . accuracy_str = \"\" self . decay_frequency = None self . loss_states = None self . is_physics_informed = False","title":"__init__()"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.Optimizer.fit","text":"Parameters: Name Type Description Default op NetworkTemplate The model which will be trained None input_data Union [ dict , Tensor , ndarray , callable ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray , callable ] The target data for the problem. None validation_data Tuple [ Union [ Tensor , ndarray , callable ]] The validation data used for the problem (if required). None n_epochs int Number of epochs for the optimization process. None loss str A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray 'rmse' params dict Extra parameters required for task-specific problems (as Physics-informed neural networks). None batch_size int The size of the batch used in each optimization epoch None device str The device in which the optimization will run, 'cpu' or 'gpu'. 'cpu' distributed bool Use distributed (multi-node) training or not. False use_jit bool Use PyTorch JIT (Just in time compilation) or not. False Source code in simulai/optimization/_optimization.py 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 @_convert_tensor_format def fit ( self , op : NetworkTemplate = None , input_data : Union [ dict , torch . Tensor , np . ndarray , callable ] = None , target_data : Union [ torch . Tensor , np . ndarray , callable ] = None , validation_data : Tuple [ Union [ torch . Tensor , np . ndarray , callable ]] = None , n_epochs : int = None , loss : str = \"rmse\" , params : dict = None , batch_size : int = None , device : str = \"cpu\" , distributed : bool = False , use_jit : bool = False , ) -> None : \"\"\" Args: op (NetworkTemplate): The model which will be trained input_data (Union[dict, torch.Tensor, np.ndarray, callable]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray, callable]): The target data for the problem. validation_data (Tuple[Union[torch.Tensor, np.ndarray, callable]]): The validation data used for the problem (if required). n_epochs (int): Number of epochs for the optimization process. loss (str): A string for referring some loss function defined on simulai/optimization/_losses.py.ndarray params (dict): Extra parameters required for task-specific problems (as Physics-informed neural networks). batch_size (int): The size of the batch used in each optimization epoch device (str): The device in which the optimization will run, 'cpu' or 'gpu'. distributed (bool): Use distributed (multi-node) training or not. use_jit (bool): Use PyTorch JIT (Just in time compilation) or not. \"\"\" # Verifying if the params dictionary contains Physics-informed # attributes extra_parameters = None if \"residual\" in params : self . is_physics_informed = True extra_parameters = self . _seek_by_extra_trainable_parameters ( residual = params [ \"residual\" ] ) if use_jit : try : params [ \"residual\" ] = torch . compile ( params [ \"residual\" ]) except AttributeError : pass else : pass _adjust_loss_function_to_model ( model = op , loss = loss , physics_informed = self . is_physics_informed ) # When using inputs with the format h5py.Dataset if callable ( input_data ) and callable ( target_data ): assert batch_size , ( \"When the input and target datasets are in disk, it is necessary to provide a \" \" value for batch_size.\" ) self . get_data = self . _get_ondisk_data else : pass # When target is None, it is expected a residual (Physics-Informed) training if target_data is None : assert \"residual\" in params , ( \"If target_data are not provided, residual must be != None \" \"in order to generate it.\" ) assert callable ( params [ \"residual\" ]), ( f \"operator must be callable,\" f \" but received { type ( params [ 'operator' ]) } .\" ) else : pass if \"causality_preserving\" in params . keys (): assert self . shuffle == False , ( \"If the causality preserving algorithm is being used,\" \" no shuffling must be allowed when creating the mini-batches.\" ) # When early-stopping is used, it is necessary to provide a validation dataset if self . early_stopping is True : assert validation_data is not None , ( \"If early-stopping is being used, it is necessary to provide a\" \"validation dataset via validation_data.\" ) else : pass # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) elif not device : device = \"cpu\" print ( \"Received None, but using cpu instead.\" ) else : raise Exception ( f \"The device must be cpu or gpu, the device { device } is not supported.\" ) if not \"device\" in params : params [ \"device\" ] = device # In a multi-device execution, the optimizer must be properly instantiated to execute distributed tasks. if distributed == True : from torch.distributed.optim import DistributedOptimizer from torch.distributed.rpc import RRef optimizer_params = list () for param in op . parameters (): optimizer_params . append ( RRef ( param )) if extra_parameters is not None : optimizer_params += extra_parameters self . optimizer_instance = DistributedOptimizer ( self . optim_class , optimizer_params , ** self . params ) else : # Guaranteeing the correct operator placement when using a single device op = op . to ( device ) # Trying to use the PyTorch JIT compilation if use_jit : try : op = torch . compile ( op ) except AttributeError : pass if extra_parameters is not None : optimizer_params = list ( op . parameters ()) + extra_parameters self . optimizer_instance = self . optim_class ( optimizer_params , ** self . params ) else : self . optimizer_instance = self . optim_class ( op . parameters (), ** self . params ) # Configuring LR decay, when necessary lr_scheduler_class = self . _get_lr_decay () if lr_scheduler_class is not None : print ( f \"Using LR decay { lr_scheduler_class } .\" ) self . lr_decay_scheduler = lr_scheduler_class ( self . optimizer_instance , ** self . lr_decay_scheduler_params ) else : pass # If GPU is being used, try to completely allocate the dataset there. if device_label == \"gpu\" : input_data = self . _try_to_transfer_to_GPU ( data = input_data , device = device ) target_data = self . _try_to_transfer_to_GPU ( data = target_data , device = device ) else : pass # Determining the kind of execution to be performed, batch-wise or not if batch_size is not None : # Determining the number of samples for each case # dictionary if type ( input_data ) is dict : key = list ( input_data . keys ())[ 0 ] self . n_samples = input_data [ key ] . size ()[ 0 ] # When using h5py.Group, the number of samples must be informed in the instantiation elif callable ( input_data ): assert self . n_samples is not None , ( \"If the dataset is on disk, it is necessary\" \"to inform n_samples using the dictionary params.\" ) # other cases: torch.Tensor, np.ndarray else : self . n_samples = input_data . size ()[ 0 ] self . _batchwise_optimization_loop ( n_epochs = n_epochs , batch_size = batch_size , loss = loss , op = op , input_data = input_data , target_data = target_data , validation_data = validation_data , params = params , device = device , ) else : # In this case, the entire datasets are placed in the same device, CPU or GPU # The datasets are initially located on CPU input_data = self . _make_input_data ( input_data , device = device ) # Target data is optional for some cases if target_data is not None : target_data = target_data . to ( device ) loss_class = self . _get_loss ( loss = loss ) loss_instance = loss_class ( operator = op ) # Instantiating the loss function loss_function = loss_instance ( input_data = input_data , target_data = target_data , ** params ) # Instantiating the validation loss function, if necessary if self . early_stopping is True : validation_input_data , validation_target_data = validation_data validation_loss_function = loss_instance ( input_data = validation_input_data , target_data = validation_target_data , ** params , ) else : validation_loss_function = None # Executing the optimization loop self . _optimization_loop ( n_epochs = n_epochs , loss_function = loss_function , op = op , loss_states = loss_instance . loss_states , validation_loss_function = validation_loss_function , )","title":"fit()"},{"location":"simulai_optimization/simulai_optimizer/#scipyinterface","text":"Source code in simulai/optimization/_optimization.py 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 class ScipyInterface : def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64 def _stack_and_convert_parameters ( self , parameters : List [ Union [ torch . Tensor , np . ndarray ]] ) -> np . ndarray : \"\"\" It produces a stack of all the model parameters. Args: parameters (List[Union[torch.Tensor, np.ndarray]]): A list containing all the model parameters in their original shapes. Returns: np.ndarray: A stack (single vertical array) of all the model parameters. \"\"\" return np . hstack ( [ param . detach () . numpy () . astype ( np . float64 ) . flatten () for param in parameters . values () ] ) def _update_and_set_parameters ( self , parameters : np . ndarray ) -> None : \"\"\" It updates the parameters with the new values estimated by the optimizer. Args: parameters (np.ndarray): The stack of all the model parameters. \"\"\" operators = [ torch . from_numpy ( parameters [ slice ( * interval )] . reshape ( shape ) . astype ( self . default_dtype ) ) . to ( self . device ) for interval , shape in zip ( self . operators_intervals , self . operators_shapes . values () ) ] for opi , parameter in enumerate ( self . fun . parameters ()): parameter . data . copy_ ( operators [ opi ]) def _exec_kwargs_forward ( self , input_data : dict = None ): \"\"\"It executes the forward pass for the model when it receives more than one input. Args: input_data dict: Data to be passed to the model. \"\"\" return self . fun . forward ( ** input_data ) def _exec_forward ( self , input_data : Union [ np . ndarray , torch . Tensor ] = None ): \"\"\"It executes the forward pass for the model. Args: input_data (Union[np.ndarray, torch.Tensor]): Data to be passed to the model. \"\"\" return self . fun . forward ( input_data = input_data ) def _fun_num ( self , parameters : np . ndarray ) -> Tuple [ float ]: \"\"\" Args: parameters (np.ndarray): The stacked parameters defined for the model. Returns: Tuple[float]: The loss(es) defined for the optimization process. \"\"\" self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () return loss . detach () . cpu () . numpy () . astype ( np . float64 ) def _fun ( self , parameters : np . ndarray ) -> Tuple [ float , np . ndarray ]: \"\"\" Args: parameters (np.ndarray): The stack of all the trainable parameters for the model. Returns: Tuple[float, np.ndarray]: A tuple containing the value for the loss function and the array of gradients for the model parameters. \"\"\" # Setting the new values for the model parameters self . _update_and_set_parameters ( parameters ) closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) loss = closure () grads = [ v . grad . detach () . cpu () . numpy () for v in self . fun . parameters ()] gradients = np . hstack ( [ v . flatten () for v , shape in zip ( grads , list ( self . operators_shapes . values ())) ] ) return loss . detach () . cpu () . numpy () . astype ( np . float64 ), gradients . astype ( np . float64 ) def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x )","title":"ScipyInterface"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.ScipyInterface.__init__","text":"An interface for using SciPy-defined optimization algorithms. Parameters: Name Type Description Default fun NetworkTemplate A model (neural network) to be trained. None optimizer str A name for an optimizar available on SciPy. None optimizer_config dict A configuration dictionary for the chosen optimizer. dict () loss callable A loss function implemented in the form of a Python function or class. None loss_config dict A configuration dictionary for the loss function. None device str The device in which the optimization will be executed ('cpu' or 'gpu'). 'cpu' jac str If necessary, define a method for evaluating the Jacobian available on SciPy. None Raises: Type Description Exception If a not recognized device is defined as 'device'. Source code in simulai/optimization/_optimization.py 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 def __init__ ( self , fun : NetworkTemplate = None , optimizer : str = None , optimizer_config : dict = dict (), loss : callable = None , loss_config : dict = None , device : str = \"cpu\" , jac : str = None , ) -> None : \"\"\"An interface for using SciPy-defined optimization algorithms. Args: fun (NetworkTemplate): A model (neural network) to be trained. optimizer (str): A name for an optimizar available on SciPy. optimizer_config (dict): A configuration dictionary for the chosen optimizer. loss (callable): A loss function implemented in the form of a Python function or class. loss_config (dict): A configuration dictionary for the loss function. device (str): The device in which the optimization will be executed ('cpu' or 'gpu'). jac (str): If necessary, define a method for evaluating the Jacobian available on SciPy. Raises: Exception: If a not recognized device is defined as 'device'. \"\"\" # Configuring the device to be used during the fitting process device_label = device if device == \"gpu\" : if not torch . cuda . is_available (): print ( \"Warning: There is no GPU available, using CPU instead.\" ) device = \"cpu\" device_label = \"cpu\" else : try : device = \"cuda:\" + os . environ [ \"LOCAL_RANK\" ] except KeyError : device = \"cuda\" device_label = \"gpu\" print ( \"Using GPU.\" ) elif device == \"cpu\" : print ( \"Using CPU.\" ) else : raise Exception ( f \"The device must be cpu or gpu, but received: { device } \" ) self . device = device self . engine = \"scipy.optimize\" self . engine_module = importlib . import_module ( self . engine ) self . minimization_method = \"minimize\" self . optimizer = getattr ( self . engine_module , self . minimization_method ) self . optimizer_config = optimizer_config or dict () self . optimizer_config [ \"method\" ] = optimizer self . fun = fun self . loss = loss self . loss_config = loss_config or dict () self . operators_names = list ( self . fun . state_dict () . keys ()) self . operators_shapes = OrderedDict ( { k : list ( v . shape ) for k , v in self . fun . state_dict () . items ()} ) self . state_0 = self . fun . state_dict () intervals = np . cumsum ( [ 0 ] + [ np . prod ( shape ) for shape in self . operators_shapes . values ()] ) self . operators_intervals = [ intervals [ i : i + 2 ] . tolist () for i in range ( len ( intervals ) - 1 ) ] if jac : self . optimizer_config [ \"jac\" ] = jac self . objective = self . _fun_num else : self . optimizer_config [ \"jac\" ] = True self . objective = self . _fun # Determining default type if torch . get_default_dtype () == torch . float32 : self . default_dtype = np . float32 else : self . default_dtype = np . float64","title":"__init__()"},{"location":"simulai_optimization/simulai_optimizer/#simulai.optimization.ScipyInterface.fit","text":"Parameters: Name Type Description Default input_data Union [ dict , Tensor , ndarray ] The (or collection of) dataset(s) used as input for the model. None target_data Union [ Tensor , ndarray ] The target data used for training the model. None Source code in simulai/optimization/_optimization.py 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 def fit ( self , input_data : Union [ dict , torch . Tensor , np . ndarray ] = None , target_data : Union [ torch . Tensor , np . ndarray ] = None , ) -> None : \"\"\" Args: input_data (Union[dict, torch.Tensor, np.ndarray]): The (or collection of) dataset(s) used as input for the model. target_data (Union[torch.Tensor, np.ndarray]): The target data used for training the model. \"\"\" parameters_0 = self . _stack_and_convert_parameters ( self . state_0 ) print ( f \" \\n Starting ScipyInterface with method: { self . optimizer_config [ 'method' ] } \\n \" ) if isinstance ( input_data , dict ): self . exec_forward = self . _exec_kwargs_forward else : self . exec_forward = self . _exec_forward self . input_data = input_data self . target_data = target_data self . closure = self . loss ( self . input_data , self . target_data , ** self . loss_config ) solution = self . optimizer ( self . objective , parameters_0 , ** self . optimizer_config ) self . _update_and_set_parameters ( solution . x )","title":"fit()"},{"location":"simulai_regression/simulai_dense/","text":"red { color: red } simulai.regression # Dense # Linear # Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class Linear ( NetworkTemplate ): name = \"linear\" engine = \"torch\" def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data ) def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name ) __init__ ( input_size = None , output_size = None , bias = True , name = None ) # Linear operator F(u) = Au + b Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name forward ( input_data = None ) # Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using Linear. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 65 66 67 68 69 70 71 72 73 74 75 76 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data ) to_numpy () # It converts the tensors in Linear to numpy.ndarray. Source code in simulai/regression/_pytorch/_dense.py 78 79 80 81 def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name ) SLFNN # Bases: Linear Source code in simulai/regression/_pytorch/_dense.py 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 class SLFNN ( Linear ): def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation ) def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data )) __init__ ( input_size = None , output_size = None , bias = True , name = None , activation = 'tanh' ) # Single layer fully-connected (dense) neural network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation ) forward ( input_data = None ) # Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using SLFNN. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 110 111 112 113 114 115 116 117 118 119 120 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data )) ShallowNetwork # Bases: SLFNN Source code in simulai/regression/_pytorch/_dense.py 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 class ShallowNetwork ( SLFNN ): def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state ) __init__ ( input_size = None , hidden_size = None , output_size = None , bias = True , name = None , activation = 'tanh' ) # ELM-like (Extreme Learning Machine) shallow network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None hidden_size int Dimension of the hidden (intermediary) state. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias or not for the last layer. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size forward ( input_data = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 155 156 157 158 159 160 161 162 163 164 165 166 167 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state ) DenseNetwork # Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 class DenseNetwork ( NetworkTemplate ): name = \"dense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )] def _calculate_gain ( self , activation : str = \"Tanh\" ) -> float : \"\"\"It evaluates a multiplier coefficient, named as `gain`, which is used to enhance the funcionality of each kind of activation function. Args: activation (str, optional): (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name . lower () in self . gain_supported_activations : return torch . nn . init . calculate_gain ( name . lower ()) else : return 1 @staticmethod def _determine_initialization ( activation : str = \"Tanh\" ) -> str : \"\"\"It determines the most proper initialization method for each activation function. Args: activation (str, optional): Activation function. (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name in [ \"ReLU\" ]: return \"kaiming\" elif name == \"Siren\" : return \"siren\" else : return \"xavier\" def _setup_layer ( self , input_size : int = 0 , output_size : int = 0 , initialization : str = None , bias : bool = True , first_layer : bool = False , ) -> torch . nn . Linear : \"\"\" Args: input_size (int, optional): Dimension of the input. (Default value = 0) output_size (int, optional): Dimension of the output. (Default value = 0) initialization (str, optional): Initialization method. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) first_layer (bool, optional): Is this layer the first layer or not. (Default value = False) \"\"\" # It instantiates a linear operation # f: y^l = f(x^(l-1)) = (W^l).dot(x^(l-1)) + b^l layer = torch . nn . Linear ( input_size , output_size , bias = bias ) if initialization == \"xavier\" : torch . nn . init . xavier_normal_ ( layer . weight , gain = self . _calculate_gain ( self . activations_str [ 0 ]) ) return layer # The Siren initialization requires some special consideration elif initialization == \"siren\" : assert ( self . c is not None ), \"When using siren, the parameter c must be defined.\" assert ( self . omega_0 is not None ), \"When using siren, the parameter omega_0 must be defined.\" if first_layer == True : m = 1 / input_size else : m = np . sqrt ( self . c / input_size ) / self . omega_0 torch . nn . init . trunc_normal_ ( layer . weight , a =- m , b = m ) b = np . sqrt ( 1 / input_size ) torch . nn . init . trunc_normal_ ( layer . bias , a =- b , b = b ) return layer elif initialization == \"kaiming\" : return layer # Kaiming is the default initialization in PyTorch else : print ( \"Initialization method still not implemented. \\ Using Kaiming instead\" ) return layer # The forward step of the network @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor __init__ ( layers_units = None , activations = None , input_size = None , output_size = None , normalization = 'bypass' , name = '' , last_bias = True , last_activation = 'identity' , ** kwargs ) # Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )] forward ( input_data = None ) # It executes the forward step for the DenseNetwork. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input tensor to be processed by DenseNetwork. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor ResDenseNetwork # Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 class ResDenseNetwork ( DenseNetwork ): name = \"residualdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :]) # Merging the layers into a reasonable sequence def _merge ( self , layer : list = None , act : list = None ) -> list : \"\"\"It merges the dense layers and the activations into a single block. Args: layer (list, optional): List of dense layers. (Default value = None) act (list, optional): List of activation functions. (Default value = None) \"\"\" merged_list = list () for i , j in zip ( layer , act ): merged_list . append ( i ) merged_list . append ( j ) return merged_list def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor __init__ ( layers_units = None , activations = None , input_size = None , output_size = None , normalization = 'bypass' , name = '' , last_bias = True , last_activation = 'identity' , residual_size = 1 , ** kwargs ) # Residual Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' residual_size int Size of the residual block. (Default value = 1) 1 Source code in simulai/regression/_pytorch/_dense.py 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :]) forward ( input_data = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor summary () # It prints a summary of the network. Source code in simulai/regression/_pytorch/_dense.py 476 477 478 479 480 481 482 483 484 485 def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block ) ConvexDenseNetwork # Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 class ConvexDenseNetwork ( DenseNetwork ): name = \"convexdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) def _check_regular_net ( self , layers_units : list ) -> bool : \"\"\"It checks if all the layers has the same number of neurons. Args: layers_units (list): \"\"\" mean = int ( sum ( layers_units ) / len ( layers_units )) self . hidden_size = mean if len ([ True for j in layers_units if j == mean ]) == len ( layers_units ): return True else : return False @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor __init__ ( layers_units = None , activations = None , input_size = None , output_size = None , normalization = 'bypass' , name = '' , last_bias = True , last_activation = 'identity' , ** kwargs ) # Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) forward ( input_data = None , u = None , v = None ) # Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Input data to be processed using ConvexDenseNetwork. (Default value = None) None u Union [ Tensor , ndarray ] Input generated by the first auxiliar encoder (external model). (Default value = None) None v Union [ Tensor , ndarray ] Input generated by the second auxiliar encoder (external model). (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor","title":"Simulai dense"},{"location":"simulai_regression/simulai_dense/#simulairegression","text":"","title":"simulai.regression"},{"location":"simulai_regression/simulai_dense/#dense","text":"","title":"Dense"},{"location":"simulai_regression/simulai_dense/#linear","text":"Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 class Linear ( NetworkTemplate ): name = \"linear\" engine = \"torch\" def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data ) def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name )","title":"Linear"},{"location":"simulai_regression/simulai_dense/#simulai.regression.Linear.__init__","text":"Linear operator F(u) = Au + b Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , ) -> None : \"\"\"Linear operator F(u) = Au + b Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) \"\"\" super ( Linear , self ) . __init__ ( name = name ) self . input_size = input_size self . output_size = output_size self . activations_str = None self . layers = [ torch . nn . Linear ( input_size , output_size , bias = bias )] self . add_module ( self . name + \"_\" + \"linear_op\" , self . layers [ 0 ]) self . weights = [ item . weight for item in self . layers ] self . bias = [ item . bias for item in self . layers ] self . name = name","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.Linear.forward","text":"Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using Linear. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 65 66 67 68 69 70 71 72 73 74 75 76 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using Linear. (Default value = None) \"\"\" return self . layers [ 0 ]( input_data )","title":"forward()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.Linear.to_numpy","text":"It converts the tensors in Linear to numpy.ndarray. Source code in simulai/regression/_pytorch/_dense.py 78 79 80 81 def to_numpy ( self ): \"\"\"It converts the tensors in Linear to numpy.ndarray.\"\"\" return LinearNumpy ( layer = self . layers [ 0 ], name = self . name )","title":"to_numpy()"},{"location":"simulai_regression/simulai_dense/#slfnn","text":"Bases: Linear Source code in simulai/regression/_pytorch/_dense.py 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 class SLFNN ( Linear ): def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation ) def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data ))","title":"SLFNN"},{"location":"simulai_regression/simulai_dense/#simulai.regression.SLFNN.__init__","text":"Single layer fully-connected (dense) neural network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias tensor or not. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 def __init__ ( self , input_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"Single layer fully-connected (dense) neural network Args: input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( SLFNN , self ) . __init__ ( input_size = input_size , output_size = output_size , bias = bias , name = name ) self . activation = self . _get_operation ( operation = activation )","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.SLFNN.forward","text":"Applying the operator Linear. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Data to be processed using SLFNN. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 110 111 112 113 114 115 116 117 118 119 120 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"Applying the operator Linear. Args: input_data (Union[torch.Tensor, np.ndarray], optional): Data to be processed using SLFNN. (Default value = None) \"\"\" return self . activation ( super () . forward ( input_data = input_data ))","title":"forward()"},{"location":"simulai_regression/simulai_dense/#shallownetwork","text":"Bases: SLFNN Source code in simulai/regression/_pytorch/_dense.py 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 class ShallowNetwork ( SLFNN ): def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state )","title":"ShallowNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ShallowNetwork.__init__","text":"ELM-like (Extreme Learning Machine) shallow network Parameters: Name Type Description Default input_size int Dimension of the input. (Default value = None) None hidden_size int Dimension of the hidden (intermediary) state. (Default value = None) None output_size int Dimension of the output. (Default value = None) None bias bool Using bias or not for the last layer. (Default value = True) True name str A name for identifying the model. (Default value = None) None activation str Activation function. (Default value = \"tanh\") 'tanh' Source code in simulai/regression/_pytorch/_dense.py 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 def __init__ ( self , input_size : int = None , hidden_size : int = None , output_size : int = None , bias : bool = True , name : str = None , activation : str = \"tanh\" , ) -> None : \"\"\"ELM-like (Extreme Learning Machine) shallow network Args: input_size (int, optional): Dimension of the input. (Default value = None) hidden_size (int, optional): Dimension of the hidden (intermediary) state. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) bias (bool, optional): Using bias or not for the last layer. (Default value = True) name (str, optional): A name for identifying the model. (Default value = None) activation (str, optional): Activation function. (Default value = \"tanh\") \"\"\" super ( ShallowNetwork , self ) . __init__ ( input_size = input_size , output_size = hidden_size , bias = bias , name = name ) self . output_layer = Linear ( input_size = hidden_size , output_size = output_size , bias = False , name = \"output\" ) self . output_size = output_size","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ShallowNetwork.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 155 156 157 158 159 160 161 162 163 164 165 166 167 def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" hidden_state = self . activation ( super () . forward ( input_data = input_data )) return self . output_layer . forward ( input_data = hidden_state )","title":"forward()"},{"location":"simulai_regression/simulai_dense/#densenetwork","text":"Bases: NetworkTemplate Source code in simulai/regression/_pytorch/_dense.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 class DenseNetwork ( NetworkTemplate ): name = \"dense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )] def _calculate_gain ( self , activation : str = \"Tanh\" ) -> float : \"\"\"It evaluates a multiplier coefficient, named as `gain`, which is used to enhance the funcionality of each kind of activation function. Args: activation (str, optional): (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name . lower () in self . gain_supported_activations : return torch . nn . init . calculate_gain ( name . lower ()) else : return 1 @staticmethod def _determine_initialization ( activation : str = \"Tanh\" ) -> str : \"\"\"It determines the most proper initialization method for each activation function. Args: activation (str, optional): Activation function. (Default value = \"Tanh\") \"\"\" if type ( activation ) is not str : assert hasattr ( activation , \"name\" ), f \"Activation object { type ( activation ) } must have attribute \u00b4name\u00b4.\" name = getattr ( activation , \"name\" ) else : name = activation if name in [ \"ReLU\" ]: return \"kaiming\" elif name == \"Siren\" : return \"siren\" else : return \"xavier\" def _setup_layer ( self , input_size : int = 0 , output_size : int = 0 , initialization : str = None , bias : bool = True , first_layer : bool = False , ) -> torch . nn . Linear : \"\"\" Args: input_size (int, optional): Dimension of the input. (Default value = 0) output_size (int, optional): Dimension of the output. (Default value = 0) initialization (str, optional): Initialization method. (Default value = None) bias (bool, optional): Using bias tensor or not. (Default value = True) first_layer (bool, optional): Is this layer the first layer or not. (Default value = False) \"\"\" # It instantiates a linear operation # f: y^l = f(x^(l-1)) = (W^l).dot(x^(l-1)) + b^l layer = torch . nn . Linear ( input_size , output_size , bias = bias ) if initialization == \"xavier\" : torch . nn . init . xavier_normal_ ( layer . weight , gain = self . _calculate_gain ( self . activations_str [ 0 ]) ) return layer # The Siren initialization requires some special consideration elif initialization == \"siren\" : assert ( self . c is not None ), \"When using siren, the parameter c must be defined.\" assert ( self . omega_0 is not None ), \"When using siren, the parameter omega_0 must be defined.\" if first_layer == True : m = 1 / input_size else : m = np . sqrt ( self . c / input_size ) / self . omega_0 torch . nn . init . trunc_normal_ ( layer . weight , a =- m , b = m ) b = np . sqrt ( 1 / input_size ) torch . nn . init . trunc_normal_ ( layer . bias , a =- b , b = b ) return layer elif initialization == \"kaiming\" : return layer # Kaiming is the default initialization in PyTorch else : print ( \"Initialization method still not implemented. \\ Using Kaiming instead\" ) return layer # The forward step of the network @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor","title":"DenseNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.DenseNetwork.__init__","text":"Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" super ( DenseNetwork , self ) . __init__ () assert layers_units , \"Please, set a list of units for each layer\" assert activations , ( \"Please, set a list of activation functions\" \"or a string for all of them.\" ) # These activations support gain evaluation for the initial state self . gain_supported_activations = [ \"sigmoid\" , \"tanh\" , \"relu\" , \"leaky_relu\" ] # Default attributes self . layers_units = layers_units self . input_size = input_size self . output_size = output_size self . normalization = normalization self . name = name self . last_bias = last_bias # For extra and not ever required parameters for k , v in kwargs . items (): setattr ( self , k , v ) # Getting up parameters from host self . _get_from_guest ( activation = activations ) self . weights = list () # The total number of layers includes the output layer self . n_layers = len ( self . layers_units ) + 1 self . default_last_activation = last_activation self . activations , self . activations_str = self . _setup_activations ( activation = activations ) self . initializations = [ self . _determine_initialization ( activation ) for activation in self . activations_str ] self . layers = self . _setup_hidden_layers ( last_bias = last_bias ) array_layers = self . _numpy_layers () n_layers = len ( self . layers ) self . shapes = [ item . shape for item in list ( sum ( array_layers , []))] self . stitch_idx = self . _make_stitch_idx () self . layers_map = [[ ll , ll + 1 ] for ll in range ( 0 , 2 * n_layers , 2 )]","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.DenseNetwork.forward","text":"It executes the forward step for the DenseNetwork. Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] The input tensor to be processed by DenseNetwork. (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\"It executes the forward step for the DenseNetwork. Args: input_data (Union[torch.Tensor, np.ndarray], optional): The input tensor to be processed by DenseNetwork. (Default value = None) \"\"\" input_tensor_ = input_data # TODO It can be done using the PyTorch Sequential object for layer_id in range ( len ( self . layers )): output_tensor_ = self . layers [ layer_id ]( input_tensor_ ) _output_tensor_ = self . activations [ layer_id ]( output_tensor_ ) input_tensor_ = _output_tensor_ output_tensor = input_tensor_ return output_tensor","title":"forward()"},{"location":"simulai_regression/simulai_dense/#resdensenetwork","text":"Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 class ResDenseNetwork ( DenseNetwork ): name = \"residualdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :]) # Merging the layers into a reasonable sequence def _merge ( self , layer : list = None , act : list = None ) -> list : \"\"\"It merges the dense layers and the activations into a single block. Args: layer (list, optional): List of dense layers. (Default value = None) act (list, optional): List of activation functions. (Default value = None) \"\"\" merged_list = list () for i , j in zip ( layer , act ): merged_list . append ( i ) merged_list . append ( j ) return merged_list def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block ) @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor","title":"ResDenseNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ResDenseNetwork.__init__","text":"Residual Dense (fully-connected) neural network written in PyTorch Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' residual_size int Size of the residual block. (Default value = 1) 1 Source code in simulai/regression/_pytorch/_dense.py 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , residual_size : int = 1 , ** kwargs , ) -> None : \"\"\"Residual Dense (fully-connected) neural network written in PyTorch Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). residual_size (int, optional): Size of the residual block. (Default value = 1) **kwargs \"\"\" super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) # Considering the activations layers self . residual_size = 2 * residual_size self . ratio = 0.5 # Excluding the input and output layers merged_layers = self . _merge ( layer = self . layers , act = self . activations ) assert len ( merged_layers [ 2 : - 2 ]) % self . residual_size == 0 , ( \"The number of layers must be divisible\" \" by the residual block size,\" f \" but received { len ( merged_layers ) } and { residual_size } \" ) self . n_residual_blocks = int ( len ( merged_layers [ 2 : - 2 ]) / self . residual_size ) sub_layers = [ item . tolist () for item in np . split ( np . array ( merged_layers [ 2 : - 2 ]), self . n_residual_blocks ) ] self . input_block = torch . nn . Sequential ( * merged_layers [: 2 ]) self . hidden_blocks = [ torch . nn . Sequential ( * item ) for item in sub_layers ] self . output_block = torch . nn . Sequential ( * merged_layers [ - 2 :])","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ResDenseNetwork.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): (Default value = None) \"\"\" input_tensor_ = input_data input_tensor_ = self . input_block ( input_tensor_ ) for block in self . hidden_blocks : output_tensor_ = self . ratio * ( input_tensor_ + block ( input_tensor_ )) input_tensor_ = output_tensor_ output_tensor = self . output_block ( input_tensor_ ) return output_tensor","title":"forward()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ResDenseNetwork.summary","text":"It prints a summary of the network. Source code in simulai/regression/_pytorch/_dense.py 476 477 478 479 480 481 482 483 484 485 def summary ( self ): \"\"\"It prints a summary of the network.\"\"\" super () . summary () print ( \"Residual Blocks: \\n \" ) print ( self . input_block ) print ( self . hidden_blocks ) print ( self . output_block )","title":"summary()"},{"location":"simulai_regression/simulai_dense/#convexdensenetwork","text":"Bases: DenseNetwork Source code in simulai/regression/_pytorch/_dense.py 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 class ConvexDenseNetwork ( DenseNetwork ): name = \"convexdense\" engine = \"torch\" def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , ) def _check_regular_net ( self , layers_units : list ) -> bool : \"\"\"It checks if all the layers has the same number of neurons. Args: layers_units (list): \"\"\" mean = int ( sum ( layers_units ) / len ( layers_units )) self . hidden_size = mean if len ([ True for j in layers_units if j == mean ]) == len ( layers_units ): return True else : return False @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor","title":"ConvexDenseNetwork"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ConvexDenseNetwork.__init__","text":"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Parameters: Name Type Description Default layers_units list List with the number of neurons for each layer. (Default value = None) None activations Union [ list , str ] List of activations for each layer or a single string None input_size int Dimension of the input. (Default value = None) None output_size int Dimension of the output. (Default value = None) None normalization str Kind of normalization used between two layers. (Default value = \"bypass\") 'bypass' name str A name for identifying the model. (Default value = \"\") '' last_bias bool Using bias in the last layer or not. (Default value = True) True last_activation str Activation for the last layer (default is 'identity'). 'identity' Source code in simulai/regression/_pytorch/_dense.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 def __init__ ( self , layers_units : list = None , activations : Union [ list , str ] = None , input_size : int = None , output_size : int = None , normalization : str = \"bypass\" , name : str = \"\" , last_bias : bool = True , last_activation : str = \"identity\" , ** kwargs , ) -> None : \"\"\"Dense network with convex combinations in the hidden layers. This architecture is useful when combined to the Improved Version of DeepONets Args: layers_units (list, optional): List with the number of neurons for each layer. (Default value = None) activations (Union[list, str], optional): List of activations for each layer or a single string informing the activation used for all of them. (Default value = None) input_size (int, optional): Dimension of the input. (Default value = None) output_size (int, optional): Dimension of the output. (Default value = None) normalization (str, optional): Kind of normalization used between two layers. (Default value = \"bypass\") name (str, optional): A name for identifying the model. (Default value = \"\") last_bias (bool, optional): Using bias in the last layer or not. (Default value = True) last_activation (str, optional): Activation for the last layer (default is 'identity'). **kwargs \"\"\" self . hidden_size = None assert self . _check_regular_net ( layers_units = layers_units ), ( \"All the hidden layers must be equal in\" \"a Convex Dense Network.\" ) super () . __init__ ( layers_units = layers_units , activations = activations , input_size = input_size , output_size = output_size , normalization = normalization , name = name , last_bias = last_bias , last_activation = last_activation , ** kwargs , )","title":"__init__()"},{"location":"simulai_regression/simulai_dense/#simulai.regression.ConvexDenseNetwork.forward","text":"Parameters: Name Type Description Default input_data Union [ Tensor , ndarray ] Input data to be processed using ConvexDenseNetwork. (Default value = None) None u Union [ Tensor , ndarray ] Input generated by the first auxiliar encoder (external model). (Default value = None) None v Union [ Tensor , ndarray ] Input generated by the second auxiliar encoder (external model). (Default value = None) None Source code in simulai/regression/_pytorch/_dense.py 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 @as_tensor def forward ( self , input_data : Union [ torch . Tensor , np . ndarray ] = None , u : Union [ torch . Tensor , np . ndarray ] = None , v : Union [ torch . Tensor , np . ndarray ] = None , ) -> torch . Tensor : \"\"\" Args: input_data (Union[torch.Tensor, np.ndarray], optional): Input data to be processed using ConvexDenseNetwork. (Default value = None) u (Union[torch.Tensor, np.ndarray], optional): Input generated by the first auxiliar encoder (external model). (Default value = None) v (Union[torch.Tensor, np.ndarray], optional): Input generated by the second auxiliar encoder (external model). (Default value = None) \"\"\" input_tensor_ = input_data # The first layer operation has no difference from the Vanilla one first_output = self . activations [ 0 ]( self . layers [ 0 ]( input_tensor_ )) input_tensor_ = first_output layers_hidden = self . layers [ 1 : - 1 ] activations_hidden = self . activations [ 1 : - 1 ] for layer_id in range ( len ( layers_hidden )): output_tensor_ = layers_hidden [ layer_id ]( input_tensor_ ) z = activations_hidden [ layer_id ]( output_tensor_ ) _output_tensor_ = ( 1 - z ) * u + z * v input_tensor_ = _output_tensor_ # The last layer operation too last_output = self . activations [ - 1 ]( self . layers [ - 1 ]( input_tensor_ )) output_tensor = last_output return output_tensor","title":"forward()"},{"location":"simulai_regression/simulai_opinf/","text":"red { color: red } OpInf # OpInf # Source code in simulai/regression/_opinf.py 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 class OpInf : def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\" # Matrix containing all the model parameters @property def O_hat ( self ) -> np . ndarray : \"\"\"The concatenation of all the coefficients matrices\"\"\" valid = [ m for m in [ self . c_hat , self . A_hat , self . H_hat , self . B_hat ] if m is not None ] return np . hstack ( valid ) @property def D_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the data matrix\"\"\" return np . array ([ self . n_samples , self . n_linear_terms + self . n_quadratic_inputs ]) @property def Res_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the right-hand side residual matrix\"\"\" return np . array ([ self . n_samples , self . n_outputs ]) @property def m_indices ( self ) -> list : \"\"\"Indices for the non-repeated observables in the Kronecker product output \"\"\" return np . vstack ([ self . i_u , self . j_u ]) . T . tolist () @property def solver_nature ( self ) -> str : \"\"\"It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: str: the solver classification \"\"\" if self . solver == \"pinv\" : return \"lazy\" else : return \"memory\" # Splitting the global solution into corresponding operators def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T # Setting up model parameters def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value ) @property def check_fits_in_memory ( self ) -> str : \"\"\"It checks if the data matrices, D and Res_matrix, can fit on memory Returns: str: the method for dealing with the data matrix, 'batch- wise' or 'global' \"\"\" total_size = np . prod ( self . D_matrix_dim ) + np . prod ( self . Res_matrix_dim ) item_size = np . array ([ 0 ]) . astype ( \"float64\" ) . itemsize allocated_memory = total_size * item_size available_memory = psutil . virtual_memory () . available if allocated_memory >= available_memory : print ( \"The data matrices does not fit in memory. Using batchwise process.\" ) return \"batchwise\" else : print ( \"The data matrices fits in memory.\" ) return \"global\" # It checks if a matrix is symmetric def _is_symmetric ( self , matrix : np . ndarray = None ) -> bool : \"\"\"It checks if the system matrix is symmetric Args: matrix (np.ndarray): the global system matrix Returns: bool: Is the matrix symmetric ? True or False \"\"\" return np . array_equal ( matrix , matrix . T ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Kronecker product augmented using extra variables (such as forcing terms) def _augmented_kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays with self products for a and b Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" ab = np . concatenate ([ a , b ], axis =- 1 ) kron_ab = self . _kronecker_product ( a = ab , b = ab ) return kron_ab # Kronecker product for the variables themselves def _simple_kronecker_product ( self , a : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Kronecker product with a=b Args: a (np;ndarray): first element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" kron_aa = self . _kronecker_product ( a = a , b = a ) return kron_aa # Serially constructing operators def _serial_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a serial way Args: input_chunks (List[np.ndarray]): list of input data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" for ii , ( i_chunk , t_chunk , f_chunk ) in enumerate ( zip ( input_chunks , target_chunks , forcing_chunks ) ): sys . stdout . write ( \" \\r Processing chunk {} of {} \" . format ( ii + 1 , len ( input_chunks )) ) sys . stdout . flush () D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = i_chunk , target_data = t_chunk , forcing_data = f_chunk ) D_o += D_o_ii R_matrix += R_matrix_ii return D_o , R_matrix # Parallely constructing operators def _parallel_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a parallel way Args: input_chunks (List[np.ndarray]): list of input data chunks forcing_chunks (List[np.ndarray]): list of forcing data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" # All the datasets list must have the same length in order to allow the compatibility and the partitions # between workers. assert len ( input_chunks ) == len ( target_chunks ) == len ( forcing_chunks ), ( \"All the list must have the same\" \"length, but received \" f \" { len ( input_chunks ) } , \" f \" { len ( target_chunks ) } and\" f \" { len ( forcing_chunks ) } \" ) keys = list () comm = MPI . COMM_WORLD rank = comm . Get_rank () n_chunks = len ( input_chunks ) if rank == 0 : for batch_id in range ( n_chunks ): print ( \"Preparing the batch {} \" . format ( batch_id )) keys . append ( f \"batch_ { batch_id } \" ) input_chunks = comm . bcast ( input_chunks , root = 0 ) target_chunks = comm . bcast ( target_chunks , root = 0 ) forcing_chunks = comm . bcast ( forcing_chunks , root = 0 ) keys = comm . bcast ( keys , root = 0 ) comm . barrier () kwargs = { \"input_chunks\" : input_chunks , \"target_chunks\" : target_chunks , \"forcing_chunks\" : forcing_chunks , \"key\" : keys , } # Pipeline for executing MPI jobs for independent sub-processes mpi_run = PipelineMPI ( exec = self . _parallel_exec_wrapper , collect = True , show_log = self . show_log ) # Fitting the model instances in parallel mpi_run . run ( kwargs = kwargs ) # When MPI finishes a run it outputs a dictionary containing status_dict the # partial result of each worker if mpi_run . success : out = mpi_run . status_dict values = out . values () # Each field in the output dictionary contains a tuple (D_0, R_matrix) # with the partial values of the OpInf system matrices D_o = sum ([ v [ 0 ] for v in values ]) R_matrix = sum ([ v [ 1 ] for v in values ]) self . success = True else : self . continuing = 0 return D_o , R_matrix # Wrapper for the independent parallel process def _parallel_exec_wrapper ( self , input_chunks : np . ndarray = None , target_chunks : np . ndarray = None , forcing_chunks : list = None , key : str = None , ) -> dict : D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = input_chunks , target_data = target_chunks , forcing_data = forcing_chunks , ) return { key : [ D_o_ii , R_matrix_ii ]} def _generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case # The field variables quadratic terms are used anyway. n_samples = input_data . shape [ 0 ] quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) # Matrix used for including constant terms in the operator expression unitary_matrix = self . bias_rescale * np . ones (( n_samples , 1 )) # Known data matrix (D) if forcing_data is not None : # Constructing D using purely linear forcing terms D = np . hstack ( [ unitary_matrix , input_data , forcing_data , quadratic_input_data ] ) else : D = np . hstack ([ unitary_matrix , input_data , quadratic_input_data ]) # Target data Res_matrix = target_data . T return D , Res_matrix # Creating datasets on disk with lazy access def _lazy_generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , save_path : str = None , batch_size : int = None , ) -> ( h5py . Dataset , h5py . Dataset , List [ slice ]): def batch_forcing ( batch : np . ndarray = None ) -> np . ndarray : return forcing_data [ batch ] def pass_forcing ( * args ) -> np . ndarray : return None if forcing_data is None : handle_forcing = pass_forcing else : handle_forcing = batch_forcing if save_path is None : save_path = self . tmp_data_path filename = os . path . join ( save_path , \"data_matrices.hdf5\" ) f = h5py . File ( filename , mode = \"w\" ) Ddset = f . create_dataset ( \"D\" , shape = tuple ( self . D_matrix_dim ), dtype = \"f\" ) Rdset = f . create_dataset ( \"Res_matrix\" , shape = tuple ( self . Res_matrix_dim ), dtype = \"f\" ) max_batches = int ( self . n_samples / batch_size ) batches = [ slice ( item [ 0 ], item [ - 1 ]) for item in np . array_split ( np . arange ( 0 , self . n_samples , 1 ), max_batches ) ] for batch in batches : # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data [ batch ], target_data = target_data [ batch ], forcing_data = handle_forcing ( batch ), ) Ddset [ batch ] = D Rdset [ batch ] = Res_matrix . T return Ddset , Rdset , batches , filename # Direct construction def _construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data ) # Constructing the data-driven component of the left operator D_o = D . T @ D # Constructing the right residual matrix R_matrix = D . T @ Res_matrix . T return D_o , R_matrix # Operators can be constructed incrementally when the dimensions are too large to # fit in common RAM. It also can be parallelized without major issues def _incremental_construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , ) -> ( np . ndarray , np . ndarray ): D_o = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_linear_terms + self . n_quadratic_inputs , ) ) R_matrix = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_outputs ) ) n_samples = input_data . shape [ 0 ] n_chunks = int ( n_samples / batch_size ) input_chunks = np . array_split ( input_data , n_chunks , axis = 0 ) target_chunks = np . array_split ( target_data , n_chunks , axis = 0 ) if forcing_data is not None : forcing_chunks = np . array_split ( forcing_data , n_chunks , axis = 0 ) else : forcing_chunks = n_chunks * [ None ] # The incremental dispatcher can be serial or parallel. D_o , R_matrix = self . dispatcher ( input_chunks = input_chunks , target_chunks = target_chunks , forcing_chunks = forcing_chunks , D_o = D_o , R_matrix = R_matrix , ) return D_o , R_matrix def _builtin_jacobian ( self , x ): return self . A_hat + ( self . K_op @ x . T ) def _external_jacobian ( self , x ): return self . jacobian_op ( x ) def _get_H_hat_column_position ( self , i : int , j : int ) -> Union [ int , None ]: jj = j - i return int (( i / 2 ) * ( 2 * self . n_inputs + 1 - i ) + jj ) def _define_H_hat_coefficient_function ( self , k : int , l : int , n : int , m : int ): if m is not None : H_coeff = self . H_hat [ k , m ] else : H_coeff = 0 if n == l : H_term = 2 * H_coeff else : H_term = H_coeff self . K_op [ k , l , n ] = H_term # Constructing a tensor for evaluating Jacobians def construct_K_op ( self , op : callable = None ) -> None : # Vector versions of the index functions get_H_hat_column_position = np . vectorize ( self . _get_H_hat_column_position ) define_H_hat_coefficient_function = np . vectorize ( self . _define_H_hat_coefficient_function ) if hasattr ( self , \"n_outputs\" ) is False : self . n_outputs = self . n_inputs if op is None : self . K_op = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) K = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) for k in range ( self . n_outputs ): K [ k , ... ] = k K = K . astype ( int ) ll = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) nn = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) L , N = np . meshgrid ( ll , nn , indexing = \"ij\" ) M_ = get_H_hat_column_position ( L , N ) M_u = np . triu ( M_ ) M = ( M_u + M_u . T - M_u . diagonal () * np . eye ( self . n_inputs )) . astype ( int ) define_H_hat_coefficient_function ( K , L , N , M ) self . jacobian = self . _builtin_jacobian else : self . jacobian_op = op self . jacobian = self . _external_jacobian # Constructing the basic setup def construct ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ) -> None : # Collecting information dimensional information from the datasets if ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == True ): assert len ( input_data . shape ) == len ( target_data . shape ) == 2 , ( \"The input and target data, \" \"must be two-dimensional but received shapes\" f \" { input_data . shape } and { target_data . shape } \" ) self . n_samples = input_data . shape [ 0 ] # When there are forcing variables there are extra operators in the model if self . forcing is not None : assert ( forcing_data is not None ), \"If the forcing terms are used, forcing data must be provided.\" assert len ( forcing_data . shape ) == 2 , ( \"The forcing data must be two-dimensional,\" f \" but received shape { forcing_data . shape } \" ) assert ( input_data . shape [ 0 ] == target_data . shape [ 0 ] == forcing_data . shape [ 0 ] ), ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } , { target_data . shape [ 0 ] } and { forcing_data . shape [ 0 ] } .\" ) self . n_forcing_inputs = forcing_data . shape [ 1 ] # For no forcing cases, the classical form is adopted else : print ( \"Forcing terms are not being used.\" ) assert input_data . shape [ 0 ] == target_data . shape [ 0 ], ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } and { target_data . shape [ 0 ] } \" ) # Number of inputs or degrees of freedom self . n_inputs = input_data . shape [ 1 ] self . n_outputs = target_data . shape [ 1 ] # When no dataset is provided to fit, it is necessary directly setting up the dimension values elif ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == False ): assert self . n_inputs != None and self . n_outputs != None , ( \"It is necessary to provide some\" \" value to n_inputs and n_outputs\" ) else : raise Exception ( \"There is no way for executing the system construction\" \" if no dataset or dimension is provided.\" ) # Defining parameters for the Kronecker product if ( self . forcing is None ) or ( self . forcing == \"linear\" ): # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] # When the forcing interaction is 'nonlinear', there operator H_hat is extended elif self . forcing == \"nonlinear\" : # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs + self . n_forcing_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] else : print ( f \"The option { self . forcing } is not allowed for the forcing kind.\" ) # Number of linear terms if forcing_data is not None : self . n_forcing_inputs = forcing_data . shape [ 1 ] self . n_linear_terms = 1 + self . n_inputs + self . n_forcing_inputs else : self . n_linear_terms = 1 + self . n_inputs self . raw_model = False # Evaluating the model operators def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" ) # Making residual evaluations using the trained operator without forcing terms def _eval ( self , input_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += self . c_hat . T return output # Making residual evaluations using the trained operator with forcing terms def _eval_forcing ( self , input_data : np . ndarray = None , forcing_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += forcing_data @ self . B_hat . T output += self . c_hat . T return output def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs ) # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) # Saving to disk a lean version of the model def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) D_matrix_dim : np . ndarray property # The dimension of the data matrix O_hat : np . ndarray property # The concatenation of all the coefficients matrices Res_matrix_dim : np . ndarray property # The dimension of the right-hand side residual matrix check_fits_in_memory : str property # It checks if the data matrices, D and Res_matrix, can fit on memory Returns: Name Type Description str str the method for dealing with the data matrix, 'batch- str wise' or 'global' m_indices : list property # Indices for the non-repeated observables in the Kronecker product output solver_nature : str property # It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: Name Type Description str str the solver classification __init__ ( forcing = None , bias_rescale = 1 , solver = 'lstsq' , parallel = None , show_log = False , engine = 'numpy' ) # Operator Inference (OpInf) Parameters: Name Type Description Default forcing str the kind of forcing to be used, 'linear' or 'nonlinear' None bias_rescale float factor for rescaling the linear coefficients (c_hat) 1 solver Union [ str , callable ] solver to be used for solving the global system, e. g. 'lstsq'. 'lstsq' parallel str the kind of parallelism to be used (currently, 'mpi' or None) None engine str the engine to be used for constructing the global system (currently just 'numpy') 'numpy' Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\" eval ( input_data = None , ** kwargs ) # Evaluating using the trained model Parameters: Name Type Description Default input_data ndarray array containing the input data None Returns: Type Description ndarray np.ndarray: output evaluation using the trained model Source code in simulai/regression/_opinf.py 952 953 954 955 956 957 958 959 960 961 962 def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs ) fit ( input_data = None , target_data = None , forcing_data = None , batch_size = None , Lambda = None , continuing = True , fit_partial = False , force_lazy_access = False , k_svd = None , save_path = None ) # Solving an Operator Inference system from large dataset Parameters: Name Type Description Default input_data ndarray dataset for the input data None target_data ndarray dataset for the target data None forcing_data ndarray dataset for the forcing data None batch_size int size of the batch used for creating the global system matrices None Lambda ndarray customized regularization matrix None Source code in simulai/regression/_opinf.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" ) lean_save ( save_path = None , model_name = None ) # Lean saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) save ( save_path = None , model_name = None ) # Complete saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) set ( ** kwargs ) # Setting up extra parameters (as regularization terms) Parameters: Name Type Description Default **kwargs dict dictionary containing extra parameters {} Returns: Type Description nothing Source code in simulai/regression/_opinf.py 206 207 208 209 210 211 212 213 214 215 216 217 def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value ) set_operators ( global_matrix = None ) # Setting up each operator using the global system solution Parameters: Name Type Description Default global_matrix ndarray the solution of the global system None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T","title":"Simulai opinf"},{"location":"simulai_regression/simulai_opinf/#opinf","text":"","title":"OpInf"},{"location":"simulai_regression/simulai_opinf/#opinf_1","text":"Source code in simulai/regression/_opinf.py 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 class OpInf : def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\" # Matrix containing all the model parameters @property def O_hat ( self ) -> np . ndarray : \"\"\"The concatenation of all the coefficients matrices\"\"\" valid = [ m for m in [ self . c_hat , self . A_hat , self . H_hat , self . B_hat ] if m is not None ] return np . hstack ( valid ) @property def D_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the data matrix\"\"\" return np . array ([ self . n_samples , self . n_linear_terms + self . n_quadratic_inputs ]) @property def Res_matrix_dim ( self ) -> np . ndarray : \"\"\"The dimension of the right-hand side residual matrix\"\"\" return np . array ([ self . n_samples , self . n_outputs ]) @property def m_indices ( self ) -> list : \"\"\"Indices for the non-repeated observables in the Kronecker product output \"\"\" return np . vstack ([ self . i_u , self . j_u ]) . T . tolist () @property def solver_nature ( self ) -> str : \"\"\"It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: str: the solver classification \"\"\" if self . solver == \"pinv\" : return \"lazy\" else : return \"memory\" # Splitting the global solution into corresponding operators def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T # Setting up model parameters def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value ) @property def check_fits_in_memory ( self ) -> str : \"\"\"It checks if the data matrices, D and Res_matrix, can fit on memory Returns: str: the method for dealing with the data matrix, 'batch- wise' or 'global' \"\"\" total_size = np . prod ( self . D_matrix_dim ) + np . prod ( self . Res_matrix_dim ) item_size = np . array ([ 0 ]) . astype ( \"float64\" ) . itemsize allocated_memory = total_size * item_size available_memory = psutil . virtual_memory () . available if allocated_memory >= available_memory : print ( \"The data matrices does not fit in memory. Using batchwise process.\" ) return \"batchwise\" else : print ( \"The data matrices fits in memory.\" ) return \"global\" # It checks if a matrix is symmetric def _is_symmetric ( self , matrix : np . ndarray = None ) -> bool : \"\"\"It checks if the system matrix is symmetric Args: matrix (np.ndarray): the global system matrix Returns: bool: Is the matrix symmetric ? True or False \"\"\" return np . array_equal ( matrix , matrix . T ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Kronecker product augmented using extra variables (such as forcing terms) def _augmented_kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"Kronecker product between two arrays with self products for a and b Args: a (np.ndarray): first element of the Kronecker product b (np.ndarray): second element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" ab = np . concatenate ([ a , b ], axis =- 1 ) kron_ab = self . _kronecker_product ( a = ab , b = ab ) return kron_ab # Kronecker product for the variables themselves def _simple_kronecker_product ( self , a : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Kronecker product with a=b Args: a (np;ndarray): first element of the Kronecker product Returns: np.ndarray: the result of the kronecker product \"\"\" kron_aa = self . _kronecker_product ( a = a , b = a ) return kron_aa # Serially constructing operators def _serial_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a serial way Args: input_chunks (List[np.ndarray]): list of input data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" for ii , ( i_chunk , t_chunk , f_chunk ) in enumerate ( zip ( input_chunks , target_chunks , forcing_chunks ) ): sys . stdout . write ( \" \\r Processing chunk {} of {} \" . format ( ii + 1 , len ( input_chunks )) ) sys . stdout . flush () D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = i_chunk , target_data = t_chunk , forcing_data = f_chunk ) D_o += D_o_ii R_matrix += R_matrix_ii return D_o , R_matrix # Parallely constructing operators def _parallel_operators_construction_dispatcher ( self , input_chunks : list = None , target_chunks : list = None , forcing_chunks : list = None , D_o : np . ndarray = None , R_matrix : np . ndarray = None , ) -> ( np . ndarray , np . ndarray ): \"\"\"Dispatching the batch-wise global data matrix evaluation in a parallel way Args: input_chunks (List[np.ndarray]): list of input data chunks forcing_chunks (List[np.ndarray]): list of forcing data chunks target_chunks (List[np.ndarray]): list of target data chunks D_o (np.ndarray): pre-allocated global matrix used for receiving the chunk-wise evaluation R_matrix (np.ndarray): pre-allocated residual matrix used for receiving the chunk-wise evaluation Returns: (np.ndarray, np.ndarray): the pair (data_matrix, residual_matrix) evaluated for all the chunks/batches \"\"\" # All the datasets list must have the same length in order to allow the compatibility and the partitions # between workers. assert len ( input_chunks ) == len ( target_chunks ) == len ( forcing_chunks ), ( \"All the list must have the same\" \"length, but received \" f \" { len ( input_chunks ) } , \" f \" { len ( target_chunks ) } and\" f \" { len ( forcing_chunks ) } \" ) keys = list () comm = MPI . COMM_WORLD rank = comm . Get_rank () n_chunks = len ( input_chunks ) if rank == 0 : for batch_id in range ( n_chunks ): print ( \"Preparing the batch {} \" . format ( batch_id )) keys . append ( f \"batch_ { batch_id } \" ) input_chunks = comm . bcast ( input_chunks , root = 0 ) target_chunks = comm . bcast ( target_chunks , root = 0 ) forcing_chunks = comm . bcast ( forcing_chunks , root = 0 ) keys = comm . bcast ( keys , root = 0 ) comm . barrier () kwargs = { \"input_chunks\" : input_chunks , \"target_chunks\" : target_chunks , \"forcing_chunks\" : forcing_chunks , \"key\" : keys , } # Pipeline for executing MPI jobs for independent sub-processes mpi_run = PipelineMPI ( exec = self . _parallel_exec_wrapper , collect = True , show_log = self . show_log ) # Fitting the model instances in parallel mpi_run . run ( kwargs = kwargs ) # When MPI finishes a run it outputs a dictionary containing status_dict the # partial result of each worker if mpi_run . success : out = mpi_run . status_dict values = out . values () # Each field in the output dictionary contains a tuple (D_0, R_matrix) # with the partial values of the OpInf system matrices D_o = sum ([ v [ 0 ] for v in values ]) R_matrix = sum ([ v [ 1 ] for v in values ]) self . success = True else : self . continuing = 0 return D_o , R_matrix # Wrapper for the independent parallel process def _parallel_exec_wrapper ( self , input_chunks : np . ndarray = None , target_chunks : np . ndarray = None , forcing_chunks : list = None , key : str = None , ) -> dict : D_o_ii , R_matrix_ii = self . _construct_operators ( input_data = input_chunks , target_data = target_chunks , forcing_data = forcing_chunks , ) return { key : [ D_o_ii , R_matrix_ii ]} def _generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case # The field variables quadratic terms are used anyway. n_samples = input_data . shape [ 0 ] quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) # Matrix used for including constant terms in the operator expression unitary_matrix = self . bias_rescale * np . ones (( n_samples , 1 )) # Known data matrix (D) if forcing_data is not None : # Constructing D using purely linear forcing terms D = np . hstack ( [ unitary_matrix , input_data , forcing_data , quadratic_input_data ] ) else : D = np . hstack ([ unitary_matrix , input_data , quadratic_input_data ]) # Target data Res_matrix = target_data . T return D , Res_matrix # Creating datasets on disk with lazy access def _lazy_generate_data_matrices ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , save_path : str = None , batch_size : int = None , ) -> ( h5py . Dataset , h5py . Dataset , List [ slice ]): def batch_forcing ( batch : np . ndarray = None ) -> np . ndarray : return forcing_data [ batch ] def pass_forcing ( * args ) -> np . ndarray : return None if forcing_data is None : handle_forcing = pass_forcing else : handle_forcing = batch_forcing if save_path is None : save_path = self . tmp_data_path filename = os . path . join ( save_path , \"data_matrices.hdf5\" ) f = h5py . File ( filename , mode = \"w\" ) Ddset = f . create_dataset ( \"D\" , shape = tuple ( self . D_matrix_dim ), dtype = \"f\" ) Rdset = f . create_dataset ( \"Res_matrix\" , shape = tuple ( self . Res_matrix_dim ), dtype = \"f\" ) max_batches = int ( self . n_samples / batch_size ) batches = [ slice ( item [ 0 ], item [ - 1 ]) for item in np . array_split ( np . arange ( 0 , self . n_samples , 1 ), max_batches ) ] for batch in batches : # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data [ batch ], target_data = target_data [ batch ], forcing_data = handle_forcing ( batch ), ) Ddset [ batch ] = D Rdset [ batch ] = Res_matrix . T return Ddset , Rdset , batches , filename # Direct construction def _construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ** kwargs , ) -> ( np . ndarray , np . ndarray ): # Generating the data-driven matrices D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data ) # Constructing the data-driven component of the left operator D_o = D . T @ D # Constructing the right residual matrix R_matrix = D . T @ Res_matrix . T return D_o , R_matrix # Operators can be constructed incrementally when the dimensions are too large to # fit in common RAM. It also can be parallelized without major issues def _incremental_construct_operators ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , ) -> ( np . ndarray , np . ndarray ): D_o = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_linear_terms + self . n_quadratic_inputs , ) ) R_matrix = np . zeros ( ( self . n_linear_terms + self . n_quadratic_inputs , self . n_outputs ) ) n_samples = input_data . shape [ 0 ] n_chunks = int ( n_samples / batch_size ) input_chunks = np . array_split ( input_data , n_chunks , axis = 0 ) target_chunks = np . array_split ( target_data , n_chunks , axis = 0 ) if forcing_data is not None : forcing_chunks = np . array_split ( forcing_data , n_chunks , axis = 0 ) else : forcing_chunks = n_chunks * [ None ] # The incremental dispatcher can be serial or parallel. D_o , R_matrix = self . dispatcher ( input_chunks = input_chunks , target_chunks = target_chunks , forcing_chunks = forcing_chunks , D_o = D_o , R_matrix = R_matrix , ) return D_o , R_matrix def _builtin_jacobian ( self , x ): return self . A_hat + ( self . K_op @ x . T ) def _external_jacobian ( self , x ): return self . jacobian_op ( x ) def _get_H_hat_column_position ( self , i : int , j : int ) -> Union [ int , None ]: jj = j - i return int (( i / 2 ) * ( 2 * self . n_inputs + 1 - i ) + jj ) def _define_H_hat_coefficient_function ( self , k : int , l : int , n : int , m : int ): if m is not None : H_coeff = self . H_hat [ k , m ] else : H_coeff = 0 if n == l : H_term = 2 * H_coeff else : H_term = H_coeff self . K_op [ k , l , n ] = H_term # Constructing a tensor for evaluating Jacobians def construct_K_op ( self , op : callable = None ) -> None : # Vector versions of the index functions get_H_hat_column_position = np . vectorize ( self . _get_H_hat_column_position ) define_H_hat_coefficient_function = np . vectorize ( self . _define_H_hat_coefficient_function ) if hasattr ( self , \"n_outputs\" ) is False : self . n_outputs = self . n_inputs if op is None : self . K_op = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) K = np . zeros (( self . n_outputs , self . n_inputs , self . n_inputs )) for k in range ( self . n_outputs ): K [ k , ... ] = k K = K . astype ( int ) ll = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) nn = np . arange ( 0 , self . n_inputs , 1 ) . astype ( int ) L , N = np . meshgrid ( ll , nn , indexing = \"ij\" ) M_ = get_H_hat_column_position ( L , N ) M_u = np . triu ( M_ ) M = ( M_u + M_u . T - M_u . diagonal () * np . eye ( self . n_inputs )) . astype ( int ) define_H_hat_coefficient_function ( K , L , N , M ) self . jacobian = self . _builtin_jacobian else : self . jacobian_op = op self . jacobian = self . _external_jacobian # Constructing the basic setup def construct ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , ) -> None : # Collecting information dimensional information from the datasets if ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == True ): assert len ( input_data . shape ) == len ( target_data . shape ) == 2 , ( \"The input and target data, \" \"must be two-dimensional but received shapes\" f \" { input_data . shape } and { target_data . shape } \" ) self . n_samples = input_data . shape [ 0 ] # When there are forcing variables there are extra operators in the model if self . forcing is not None : assert ( forcing_data is not None ), \"If the forcing terms are used, forcing data must be provided.\" assert len ( forcing_data . shape ) == 2 , ( \"The forcing data must be two-dimensional,\" f \" but received shape { forcing_data . shape } \" ) assert ( input_data . shape [ 0 ] == target_data . shape [ 0 ] == forcing_data . shape [ 0 ] ), ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } , { target_data . shape [ 0 ] } and { forcing_data . shape [ 0 ] } .\" ) self . n_forcing_inputs = forcing_data . shape [ 1 ] # For no forcing cases, the classical form is adopted else : print ( \"Forcing terms are not being used.\" ) assert input_data . shape [ 0 ] == target_data . shape [ 0 ], ( \"The number of samples is not the same for all the sets with\" f \" { input_data . shape [ 0 ] } and { target_data . shape [ 0 ] } \" ) # Number of inputs or degrees of freedom self . n_inputs = input_data . shape [ 1 ] self . n_outputs = target_data . shape [ 1 ] # When no dataset is provided to fit, it is necessary directly setting up the dimension values elif ( isinstance ( input_data , np . ndarray ) == isinstance ( target_data , np . ndarray ) == False ): assert self . n_inputs != None and self . n_outputs != None , ( \"It is necessary to provide some\" \" value to n_inputs and n_outputs\" ) else : raise Exception ( \"There is no way for executing the system construction\" \" if no dataset or dimension is provided.\" ) # Defining parameters for the Kronecker product if ( self . forcing is None ) or ( self . forcing == \"linear\" ): # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] # When the forcing interaction is 'nonlinear', there operator H_hat is extended elif self . forcing == \"nonlinear\" : # Getting the upper component indices of a symmetric matrix self . i_u , self . j_u = np . triu_indices ( self . n_inputs + self . n_forcing_inputs ) self . n_quadratic_inputs = self . i_u . shape [ 0 ] else : print ( f \"The option { self . forcing } is not allowed for the forcing kind.\" ) # Number of linear terms if forcing_data is not None : self . n_forcing_inputs = forcing_data . shape [ 1 ] self . n_linear_terms = 1 + self . n_inputs + self . n_forcing_inputs else : self . n_linear_terms = 1 + self . n_inputs self . raw_model = False # Evaluating the model operators def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" ) # Making residual evaluations using the trained operator without forcing terms def _eval ( self , input_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += self . c_hat . T return output # Making residual evaluations using the trained operator with forcing terms def _eval_forcing ( self , input_data : np . ndarray = None , forcing_data : np . ndarray = None ) -> np . ndarray : # If forcing_data is None, the Kronecker product is applied just for the field # variables, thus reducing to the no forcing term case quadratic_input_data = self . kronecker_product ( a = input_data , b = forcing_data ) output = input_data @ self . A_hat . T output += quadratic_input_data @ self . H_hat . T output += forcing_data @ self . B_hat . T output += self . c_hat . T return output def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs ) # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) # Saving to disk a lean version of the model def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"OpInf"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.D_matrix_dim","text":"The dimension of the data matrix","title":"D_matrix_dim"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.O_hat","text":"The concatenation of all the coefficients matrices","title":"O_hat"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.Res_matrix_dim","text":"The dimension of the right-hand side residual matrix","title":"Res_matrix_dim"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.check_fits_in_memory","text":"It checks if the data matrices, D and Res_matrix, can fit on memory Returns: Name Type Description str str the method for dealing with the data matrix, 'batch- str wise' or 'global'","title":"check_fits_in_memory"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.m_indices","text":"Indices for the non-repeated observables in the Kronecker product output","title":"m_indices"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.solver_nature","text":"It classifies the solver used in 'lazy' (when data is stored on disk) and 'memory' (when data is all allocated in memory) Returns: Name Type Description str str the solver classification","title":"solver_nature"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.__init__","text":"Operator Inference (OpInf) Parameters: Name Type Description Default forcing str the kind of forcing to be used, 'linear' or 'nonlinear' None bias_rescale float factor for rescaling the linear coefficients (c_hat) 1 solver Union [ str , callable ] solver to be used for solving the global system, e. g. 'lstsq'. 'lstsq' parallel str the kind of parallelism to be used (currently, 'mpi' or None) None engine str the engine to be used for constructing the global system (currently just 'numpy') 'numpy' Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , forcing : str = None , bias_rescale : float = 1 , solver : Union [ str , callable ] = \"lstsq\" , parallel : Union [ str , None ] = None , show_log : bool = False , engine : str = \"numpy\" , ) -> None : \"\"\"Operator Inference (OpInf) Args: forcing (str): the kind of forcing to be used, 'linear' or 'nonlinear' bias_rescale (float): factor for rescaling the linear coefficients (c_hat) solver (Union[str, callable]): solver to be used for solving the global system, e. g. 'lstsq'. parallel (str): the kind of parallelism to be used (currently, 'mpi' or None) engine (str): the engine to be used for constructing the global system (currently just 'numpy') Returns: nothing \"\"\" # forcing is chosen among (None, 'linear', 'nonlinear') self . forcing = forcing self . bias_rescale = bias_rescale self . solver = solver self . parallel = parallel self . show_log = show_log self . engine = engine if self . forcing is not None : self . eval_op = self . _eval_forcing else : self . eval_op = self . _eval if self . forcing == \"nonlinear\" : self . kronecker_product = self . _augmented_kronecker_product else : self . kronecker_product = self . _simple_kronecker_product if self . parallel == None : self . dispatcher = self . _serial_operators_construction_dispatcher elif self . parallel == \"mpi\" : if MPI_GLOBAL_AVAILABILITY == True : self . dispatcher = self . _parallel_operators_construction_dispatcher else : raise Exception ( \"Trying to execute a MPI job but there is no MPI distribution available.\" ) else : raise Exception ( f \"The option { self . parallel } for parallel is not valid. It must be None or mpi\" ) self . lambda_linear = 0 self . lambda_quadratic = 0 self . n_inputs = None self . n_outputs = None self . n_samples = None self . n_quadratic_inputs = None self . n_forcing_inputs = 0 self . jacobian = None self . jacobian_op = None self . D_o = None self . R_matrix = None # OpInf adjustable operators self . c_hat = None # Bias self . A_hat = None # Coefficients for the linear field variable terms self . H_hat = None # Coefficients for the nonlinear quadratic terms self . B_hat = None # Coefficients for the linear forcing terms self . success = None self . continuing = 1 self . raw_model = True self . tmp_data_path = \"/tmp\"","title":"__init__()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.eval","text":"Evaluating using the trained model Parameters: Name Type Description Default input_data ndarray array containing the input data None Returns: Type Description ndarray np.ndarray: output evaluation using the trained model Source code in simulai/regression/_opinf.py 952 953 954 955 956 957 958 959 960 961 962 def eval ( self , input_data : np . ndarray = None , ** kwargs ) -> np . ndarray : \"\"\"Evaluating using the trained model Args: input_data (np.ndarray): array containing the input data Returns: np.ndarray: output evaluation using the trained model \"\"\" return self . eval_op ( input_data = input_data , ** kwargs )","title":"eval()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.fit","text":"Solving an Operator Inference system from large dataset Parameters: Name Type Description Default input_data ndarray dataset for the input data None target_data ndarray dataset for the target data None forcing_data ndarray dataset for the forcing data None batch_size int size of the batch used for creating the global system matrices None Lambda ndarray customized regularization matrix None Source code in simulai/regression/_opinf.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 784 785 786 787 788 789 790 791 792 793 794 795 796 797 798 799 800 801 802 803 804 805 806 807 808 809 810 811 812 813 814 815 816 817 818 819 820 821 822 823 824 825 826 827 828 829 830 831 832 833 834 835 836 837 838 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , forcing_data : np . ndarray = None , batch_size : int = None , Lambda : np . ndarray = None , continuing : Optional [ bool ] = True , fit_partial : Optional [ bool ] = False , force_lazy_access : Optional [ bool ] = False , k_svd : Optional [ int ] = None , save_path : Optional [ str ] = None , ) -> None : \"\"\"Solving an Operator Inference system from large dataset Args: input_data (np.ndarray): dataset for the input data target_data (np.ndarray): dataset for the target data forcing_data (np.ndarray): dataset for the forcing data batch_size (int): size of the batch used for creating the global system matrices Lambda (np.ndarray): customized regularization matrix \"\"\" if type ( self . solver ) == str : self . construct ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) # Constructing the system operators if self . solver_nature == \"memory\" : # This operation can require a large memory footprint, so it also can be executed # in chunks and, eventually, in parallel. if isinstance ( batch_size , int ): construct_operators = self . _incremental_construct_operators else : construct_operators = self . _construct_operators if self . D_o is None and self . R_matrix is None : D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o = D_o self . R_matrix = R_matrix if ( type ( self . D_o ) == np . ndarray and type ( self . R_matrix ) == np . ndarray and fit_partial is True ): D_o , R_matrix = construct_operators ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , batch_size = batch_size , ) self . D_o += D_o self . R_matrix += R_matrix else : D_o = self . D_o R_matrix = self . R_matrix self . continuing = 1 # If just system matrices, D_o and R_matrix are desired, the execution can be interrupted # here. if self . continuing and continuing is not False : # Regularization operator if Lambda is None : Lambda = np . ones ( self . n_linear_terms + self . n_quadratic_inputs ) Lambda [: self . n_linear_terms ] = self . lambda_linear Lambda [ self . n_linear_terms :] = self . lambda_quadratic else : print ( \"Using an externally defined Lambda vector.\" ) Gamma = Lambda * np . eye ( self . n_linear_terms + self . n_quadratic_inputs ) # Left operator L_operator = D_o + Gamma . T @ Gamma # Solving the linear system via least squares print ( \"Solving linear system ...\" ) if self . _is_symmetric ( L_operator ) and self . solver is None : print ( \"L_operator is symmetric.\" ) solution = solve ( L_operator , R_matrix , assume_a = \"sym\" ) elif self . solver == \"pinv_close\" : D_o_pinv = np . linalg . pinv ( D_o ) solution = D_o_pinv @ R_matrix else : solution = np . linalg . lstsq ( L_operator , R_matrix , rcond = None )[ 0 ] # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) # It corresponds to the case 'lazy' in which data is temporally stored on disk. # In case of using the Moore-Penrose pseudo-inverse it is necessary # to store the entire data matrices in order to solve the undetermined system else : if self . check_fits_in_memory == \"global\" and force_lazy_access is False : D , Res_matrix = self . _generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , ) D_pinv = np . linalg . pinv ( D ) solution = D_pinv @ Res_matrix . T else : if force_lazy_access is True : print ( \"The batchwise execution is being forced.\" ) assert ( batch_size is not None ), f \"It is necessary to define batch_size but received { batch_size } .\" ( D , Res_matrix , batches , filename , ) = self . _lazy_generate_data_matrices ( input_data = input_data , target_data = target_data , forcing_data = forcing_data , save_path = save_path , batch_size = batch_size , ) if k_svd is None : k_svd = self . n_inputs pinv = CompressedPinv ( D = D , chunks = ( batch_size , self . n_inputs ), k = k_svd ) solution = pinv ( Y = Res_matrix , batches = batches ) # Removing the file stored in disk os . remove ( filename ) # Setting up the employed matrix operators self . set_operators ( global_matrix = solution ) elif callable ( self . solver ): warnings . warn ( \"Iterative solvers are not currently supported.\" ) warnings . warn ( \"Finishing fitting process without modifications.\" ) else : raise Exception ( f \"The option { type ( self . solver ) } is not suported. \\ it must be callable or str.\" ) print ( \"Fitting process concluded.\" )","title":"fit()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.lean_save","text":"Lean saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 def lean_save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Lean saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" # Parameters to be removed in a lean version of the model black_list = [ \"D_o\" , \"R_matrix\" ] path = os . path . join ( save_path , model_name + \".pkl\" ) self_copy = deepcopy ( self ) for item in black_list : del self_copy . __dict__ [ item ] try : with open ( path , \"wb\" ) as fp : pickle . dump ( self_copy , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"lean_save()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.save","text":"Complete saving Parameters: Name Type Description Default save_path str path to the saving directory None model_name str name for the model None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str): path to the saving directory model_name (str): name for the model Returns: nothing \"\"\" path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"save()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.set","text":"Setting up extra parameters (as regularization terms) Parameters: Name Type Description Default **kwargs dict dictionary containing extra parameters {} Returns: Type Description nothing Source code in simulai/regression/_opinf.py 206 207 208 209 210 211 212 213 214 215 216 217 def set ( self , ** kwargs ): \"\"\"Setting up extra parameters (as regularization terms) Args: **kwargs (dict): dictionary containing extra parameters Returns: nothing \"\"\" for key , value in kwargs . items (): setattr ( self , key , value )","title":"set()"},{"location":"simulai_regression/simulai_opinf/#simulai.regression.OpInf.set_operators","text":"Setting up each operator using the global system solution Parameters: Name Type Description Default global_matrix ndarray the solution of the global system None Returns: Type Description None nothing Source code in simulai/regression/_opinf.py 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def set_operators ( self , global_matrix : np . ndarray = None ) -> None : \"\"\"Setting up each operator using the global system solution Args: global_matrix (np.ndarray): the solution of the global system Returns: nothing \"\"\" if self . n_inputs == None and self . n_outputs == None : self . n_inputs = self . n_outputs = global_matrix . shape [ 1 ] if self . raw_model == True : self . construct () if self . forcing is not None : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . B_hat = global_matrix [ self . n_inputs + 1 : self . n_inputs + 1 + self . n_forcing_inputs ] . T self . H_hat = global_matrix [ self . n_inputs + 1 + self . n_forcing_inputs :] . T else : self . c_hat = global_matrix [: 1 ] . T self . A_hat = global_matrix [ 1 : self . n_inputs + 1 ] . T self . H_hat = global_matrix [ self . n_inputs + 1 :] . T","title":"set_operators()"},{"location":"simulai_rom/simulai_rom/","text":"red { color: red } simulai.rom # POD # Bases: ROM It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that Source code in simulai/rom/_rom.py 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class POD ( ROM ): \"\"\"It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that \"\"\" name = \"pod\" def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) __init__ ( config = None , svd_filter = None ) # Propor Orthogonal Decomposition Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter fit ( data = None ) # Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) project ( data = None ) # Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) reconstruct ( projected_data = None ) # Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) restore ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 323 324 325 326 327 328 329 330 331 332 333 334 335 336 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) save ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 309 310 311 312 313 314 315 316 317 318 319 320 321 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) IPOD # Bases: ROM Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 class IPOD ( ROM ): \"\"\"Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. \"\"\" name = \"ipod\" def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) __init__ ( config = None , data_mean = None , svd_filter = None ) # Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None data_mean ndarray pre-evaluated mean of the dataset (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter fit ( data = None ) # Output shape: (space_dimension, n_modes) Parameters: Name Type Description Default data ndarray (Default value = None) None Source code in simulai/rom/_rom.py 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy project ( data = None ) # Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) reconstruct ( projected_data = None ) # Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) restore ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) ) save ( save_path = None , model_name = None ) # It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 501 502 503 504 505 506 507 508 509 510 511 512 513 514 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) GPOD # Bases: ROM Source code in simulai/rom/_rom.py 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 class GPOD ( ROM ): def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None # It gets the positions related to the n maximum and n minimum values to be used # to locate sensors def _extrema ( self ): locations = list () n_modes = self . modes . shape [ 0 ] for mode_i in range ( n_modes ): n_sensors = self . sensors_distribution [ mode_i ] n_minimum = n_maximum = int ( n_sensors / 2 ) locations += self . modes [ mode_i ] . argsort ()[: n_minimum ] . tolist () locations += self . modes [ mode_i ] . argsort ()[ - n_maximum :] . tolist () return locations # The m dot product (a, b)_m = (m*a, m*b), in which m is a mask array def m_dot ( self , a , b , mask_array = None ): return ( mask_array * a ) . dot (( mask_array * b ) . T ) def fit ( self , data = None ): self . pca . fit ( data = data ) self . modes = self . pca . modes n_features = self . modes . shape [ 1 ] sensors_locations = self . placer () mask_array = np . zeros (( 1 , n_features )) mask_array [:, sensors_locations ] = 1 self . mask_array = mask_array self . M = self . m_dot ( self . modes , self . modes , mask_array = mask_array ) self . M_inv = np . linalg . inv ( self . M ) print ( f \"The condition number for the matrix M is { np . linalg . cond ( self . M ) } \" ) def project ( self , data = None ): data_til = self . mask_array * data f = self . m_dot ( data_til , self . modes , mask_array = self . mask_array ) return f @ self . M_inv . T def reconstruct ( self , projected_data = None ): return self . pca . reconstruct ( projected_data = projected_data ) __init__ ( pca_type = 'pod' , pca_config = None , config = None ) # GPOD Parameters: Name Type Description Default pca_type str the kind of PCA to be used (Default value = \"pod\") 'pod' pca_config (Default value = None) None config (Default value = None) None Source code in simulai/rom/_rom.py 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None HOSVD # Bases: ROM High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 class HOSVD ( ROM ): \"\"\"High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. \"\"\" name = \"hosvd\" def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" ) def _configure_SVD ( self ) -> Union [ List [ TruncatedSVD ], List [ ParallelSVD ]]: if self . engine == \"sklearn\" : return [ TruncatedSVD ( n_components = n ) for n in self . n_components ] elif self . engine == \"dask\" : return [ ParallelSVD ( n_components = n ) for n in self . n_components ] else : raise Exception ( f \"The engine { self . engine } is not supported, it must be in ['sklearn', 'dask'].\" ) def _set_components ( self ) -> None : for j , name in enumerate ( self . components_names ): setattr ( self , name . upper () + self . _comp_tag , self . U_list [ j ]) def _k_svd ( self , data : np . ndarray = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"SVD applied to the k-mode flattening Args: data (np.ndarray, optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: Left eigenvectors matrix U \"\"\" self . svd_classes [ k ] . fit ( data ) if self . engine == \"sklearn\" : s = self . svd_classes [ k ] . singular_values_ * np . eye ( self . n_components [ k ]) VT = self . svd_classes [ k ] . components_ SVT = s @ VT U = ( np . linalg . pinv ( SVT . T ) @ data . T ) . T else : U = getattr ( self . svd_classes [ k ], \"U\" ) return U def _k_flattening ( self , data : Union [ np . ndarray , da . core . Array ] = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"k-mode flattening Args: data (Union[np.ndarray, da.core.Array], optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: reshaped array of shape (n_1, n_2*n_3*...*n_n) \"\"\" sizelist = copy . deepcopy ( self . sizelist ) sizelist_collapsible = copy . deepcopy ( sizelist ) sizelist [ 0 ] = k sizelist [ k ] = 0 sizelist_collapsible . pop ( k ) collapsible_dims = np . prod ([ self . shape [ s ] for s in sizelist_collapsible ]) if isinstance ( data , da . core . Array ): return data . transpose ( sizelist ) . reshape ( ( - 1 , collapsible_dims ), limit = self . limit ) else : return data . transpose ( sizelist ) . reshape ( - 1 , collapsible_dims ) def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components () def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose () # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) __init__ ( n_components = None , components_names = None , engine = 'sklearn' , limit = '1 GiB' ) # Parameters: Name Type Description Default n_components List [ int ] list with the number of components for each direction (Default value = None) None components_names List [ str ] (Default value = None) None engine str (Default value = \"sklearn\") 'sklearn' limit str (Default value = \"1 GiB\") '1 GiB' Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" ) fit ( data = None ) # Executing High-Order SVD Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components () project ( data = None ) # Projecting using the SVD basis Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description Union [ ndarray , Array ] np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') Source code in simulai/rom/_rom.py 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S reconstruct ( data = None , replace_components = None ) # Reconstruction using the pre-existent basis Parameters: Name Type Description Default data Union [ ndarray , Array ] reduced array of shape (n_1', n_2', ..., None n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: Type Description Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] n_n) Source code in simulai/rom/_rom.py 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose () save ( save_path = None , model_name = None ) # Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) ParallelSVD # Bases: ROM Source code in simulai/rom/_rom.py 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 class ParallelSVD ( ROM ): name = \"parallel_svd\" def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None def _chunk_size_condition ( self , size : int , chunk_size : int ) -> int : if size // chunk_size == 0 : return size else : return size // chunk_size def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: if self . chunks == None : chunks = [ self . _chunk_size_condition ( size , self . default_chunks_numbers [ j ]) for j , size in enumerate ( data . shape ) ] else : chunks = self . chunks if isinstance ( data , np . ndarray ): parallel_data = da . from_array ( data , chunks = chunks ) else : parallel_data = data U , s , V = da . linalg . svd_compressed ( parallel_data , k = self . n_components ) self . U = U self . s = s self . V = V __init__ ( n_components = None , chunks = None ) # Executing SVD using dask Parameters: Name Type Description Default n_components int (Default value = None) None chunks Tuple [ int ] (Default value = None) None Source code in simulai/rom/_rom.py 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None QQM # Source code in simulai/rom/_rom.py 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 class QQM : def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"It executes a Kronecker dot between two arrays Args: a (np.ndarray, optional): left array (Default value = None) b (np.ndarray, optional): right (transposed) array (Default value = None) Returns: np.ndarray: the Kronecker output array \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Each batch in W has n_inputs*(n_inputs + 1)/2 columns def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data ) def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" ) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ] def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args ) W_transform ( data = None ) # W_transform simply applied Kronecker product for data itself Parameters: Name Type Description Default data ndarray the data to be W-transformed (Default value = None) None Returns: Type Description ndarray np.ndarray: the Kronecker product between data and data ndarray np.ndarray: the Kronecker product between data and data ndarray itself Source code in simulai/rom/_rom.py 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data ) __init__ ( n_inputs = None , alpha_0 = None , sparsity_tol = 1e-15 , lambd = None , epsilon = 1e-10 , use_mean = False ) # It extends and enriches the POD approach by determining a quadratic basis for its residual Parameters: Name Type Description Default n_inputs int number of inputs used in the POD approximation (Default value = None) None alpha_0 float regularization parameter used in SparSA algorithm (Default value = None) None sparsity_tol float sparsity tolerance used in SpaRSA (Default value = 1e-15) 1e-15 lambd float regularization parameter used in SparSA algorithm (Default value = None) None epsilon float threshold for zeroing columns in SpaRSA (Default value = 1e-10) 1e-10 use_mean bool use mean for the SpaRSA loss function of not ? (Default value = False) False Returns: Type Description None nothing Source code in simulai/rom/_rom.py 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , ) eval ( data = None ) # It projects and reconstructs Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description None np.ndarray: the approximated data Source code in simulai/rom/_rom.py 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar fit ( input_data = None , target_data = None , pinv = False ) # It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Parameters: Name Type Description Default input_data ndarray in general, the original latent None series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" ) project ( data = None ) # Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description ndarray np.ndarray: the projection over the selected basis Source code in simulai/rom/_rom.py 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ] save ( save_path = None , model_name = None ) # Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"Simulai rom"},{"location":"simulai_rom/simulai_rom/#simulairom","text":"","title":"simulai.rom"},{"location":"simulai_rom/simulai_rom/#pod","text":"Bases: ROM It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that Source code in simulai/rom/_rom.py 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 class POD ( ROM ): \"\"\"It executes the classical Proper Orthogonal Decomposition using the SciKit-learn interface. The PCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data in order to ensure that \"\"\" name = \"pod\" def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"POD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.__init__","text":"Propor Orthogonal Decomposition Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 def __init__ ( self , config : dict = None , svd_filter : callable = None ) -> None : \"\"\"Propor Orthogonal Decomposition Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = PCA ( ** config ) self . modes = None self . data_mean = None self . svd_filter = svd_filter","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.fit","text":"Parameters: Name Type Description Default data ndarray (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 def fit ( self , data : np . ndarray = None ) -> None : \"\"\" Args: data (np.ndarray, optional): (Default value = None) Returns: : nothing \"\"\" if self . mean_component : self . data_mean = data . mean ( 0 ) data_til = data - self . data_mean mean_contrib = np . linalg . norm ( self . data_mean , 2 ) / np . linalg . norm ( data , 2 ) print ( \"Relative contribution of the mean component: {} \" . format ( mean_contrib ) ) else : data_til = data decomp = self . pca . fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy ))","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.project","text":"Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data_til ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T )","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.reconstruct","text":"Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] n_modes = self . singular_values . shape [ 0 ] if n_modes_used < n_modes : print ( f \"Truncating the number of modes from { n_modes } to { n_modes_used } \" ) if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )])","title":"reconstruct()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.restore","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 323 324 325 326 327 328 329 330 331 332 333 334 335 336 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"restore()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.POD.save","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 309 310 311 312 313 314 315 316 317 318 319 320 321 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean )","title":"save()"},{"location":"simulai_rom/simulai_rom/#ipod","text":"Bases: ROM Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 class IPOD ( ROM ): \"\"\"Incremental Propor Orthogonal Decomposition It executes the Incremental Proper Orthogonal Decomposition using the SciKit-learn interface The IncrementalPCA class from SciKit-learn expects a two-dimensional array as input, so it is necessary to reshape the input data before processing it. This class is intended to be used for Big Data purposes. \"\"\" name = \"ipod\" def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T ) def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean ) def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"IPOD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.__init__","text":"Parameters: Name Type Description Default config dict configuration dictionary for the POD parameters (Default value = None) None data_mean ndarray pre-evaluated mean of the dataset (Default value = None) None svd_filter callable a filter callable applied to SVD decomposition (Default value = None) None Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 def __init__ ( self , config : dict = None , data_mean : np . ndarray = None , svd_filter : callable = None , ) -> None : \"\"\" Args: config (dict, optional): configuration dictionary for the POD parameters (Default value = None) data_mean (np.ndarray, optional): pre-evaluated mean of the dataset (Default value = None) svd_filter (callable, optional): a filter callable applied to SVD decomposition (Default value = None) Returns: None: nothing \"\"\" super () . __init__ () self . kind = \"batchwise\" if \"n_components\" not in config : config [ \"n_components\" ] = None if \"mean_component\" in config : self . mean_component = config . pop ( \"mean_component\" ) else : self . mean_component = True if \"eig_norm\" in config : self . eig_norm = config . pop ( \"eig_norm\" ) else : self . eig_norm = False self . pca = IncrementalPCA ( ** config ) self . modes = None self . data_mean = data_mean self . data_size = None self . svd_filter = svd_filter","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.fit","text":"Output shape: (space_dimension, n_modes) Parameters: Name Type Description Default data ndarray (Default value = None) None Source code in simulai/rom/_rom.py 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 def fit ( self , data : np . ndarray = None ) -> None : \"\"\"Output shape: (space_dimension, n_modes) Args: data (np.ndarray, optional): (Default value = None) \"\"\" if self . data_mean is None : if not isinstance ( self . data_mean , np . ndarray ) and not self . data_size : self . data_mean = data . mean ( 0 ) self . data_size = data . shape [ 0 ] else : self . data_mean = ( self . data_size * self . data_mean + data . shape [ 0 ] * data . mean ( 0 ) ) / ( self . data_size + data . shape [ 0 ]) self . data_size += data . shape [ 0 ] else : assert ( len ( self . data_mean . shape ) == 1 ), f \"The data_mean array must have dimension 1, but received shape { self . data_mean . shape } \" if self . mean_component : data_til = data - self . data_mean else : data_til = data decomp = self . pca . partial_fit ( data_til ) self . modes = decomp . components_ self . singular_values = decomp . singular_values_ # Executing SVD filtering over the singular values if necessary if self . svd_filter is not None : self . singular_values_truncated = self . svd_filter . exec ( singular_values = self . singular_values , data_shape = data . shape ) n_values = len ( self . singular_values_truncated ) self . singular_values = self . singular_values_truncated self . modes = self . modes [: n_values , :] else : pass relative_modal_energy = decomp . explained_variance_ratio_ . sum () print ( \"Relative Modal Energy {} \" . format ( relative_modal_energy )) self . relative_modal_energy = relative_modal_energy","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.project","text":"Parameters: Name Type Description Default data ndarray array of shape (n_samples, n_features) (Default value = None) None Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray np.ndarray: array of shape (n_samples, n_modes) containing ndarray the projection over the POD basis Source code in simulai/rom/_rom.py 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: data (np.ndarray, optional): array of shape (n_samples, n_features) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_modes) containing np.ndarray: array of shape (n_samples, n_modes) containing the projection over the POD basis \"\"\" if self . mean_component : data_til = data - self . data_mean else : data_til = data if not type ( self . modes ) == np . ndarray : self . fit ( data ) if self . eig_norm : return np . sqrt ( self . singular_values )[ None , :] * ( data_til . dot ( self . modes . T )) else : return data_til . dot ( self . modes . T )","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.reconstruct","text":"Parameters: Name Type Description Default projected_data ndarray array of shape (n_samples, None n_modes) (Default value = None) Returns: Type Description ndarray np.ndarray: array of shape (n_samples, n_features) Source code in simulai/rom/_rom.py 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 def reconstruct ( self , projected_data : np . ndarray = None ) -> np . ndarray : \"\"\" Args: projected_data (np.ndarray, optional): array of shape (n_samples, n_modes) (Default value = None) Returns: np.ndarray: array of shape (n_samples, n_features) \"\"\" n_modes_used = projected_data . shape [ - 1 ] if getattr ( self , \"eig_norm\" , False ) != False : singular_values = self . singular_values [ slice ( 0 , n_modes_used )] projected_data = ( 1 / np . sqrt ( singular_values )[ None , :]) * projected_data else : pass \"\"\" It is possible to reconstruct using less modes than created during the ROM construction, so we will adjust the size of self.modes according to projected_data \"\"\" if self . mean_component : # We are using the approach of evaluating the mean value incrementally # If this is the best way for doing it, just the experiments will demonstrate return ( projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )]) + self . data_mean ) else : return projected_data . dot ( self . modes [ slice ( 0 , n_modes_used )])","title":"reconstruct()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.restore","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 516 517 518 519 520 521 522 523 524 525 526 527 528 529 def restore ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" self . modes , self . data_mean = np . load ( os . path . join ( save_path , model_name + \".npz\" ) )","title":"restore()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.IPOD.save","text":"It saves data in a NPZ file Parameters: Name Type Description Default save_path str path to save the model (Default value = None) None model_name str name for the saved model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 501 502 503 504 505 506 507 508 509 510 511 512 513 514 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"It saves data in a NPZ file Args: save_path (str, optional): path to save the model (Default value = None) model_name (str, optional): name for the saved model (Default value = None) Returns: : nothing \"\"\" np . savez ( os . path . join ( save_path , model_name + \".npz\" ), self . modes , self . data_mean )","title":"save()"},{"location":"simulai_rom/simulai_rom/#gpod","text":"Bases: ROM Source code in simulai/rom/_rom.py 839 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 892 893 894 895 896 897 898 899 900 901 902 903 904 905 906 907 908 909 910 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 class GPOD ( ROM ): def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None # It gets the positions related to the n maximum and n minimum values to be used # to locate sensors def _extrema ( self ): locations = list () n_modes = self . modes . shape [ 0 ] for mode_i in range ( n_modes ): n_sensors = self . sensors_distribution [ mode_i ] n_minimum = n_maximum = int ( n_sensors / 2 ) locations += self . modes [ mode_i ] . argsort ()[: n_minimum ] . tolist () locations += self . modes [ mode_i ] . argsort ()[ - n_maximum :] . tolist () return locations # The m dot product (a, b)_m = (m*a, m*b), in which m is a mask array def m_dot ( self , a , b , mask_array = None ): return ( mask_array * a ) . dot (( mask_array * b ) . T ) def fit ( self , data = None ): self . pca . fit ( data = data ) self . modes = self . pca . modes n_features = self . modes . shape [ 1 ] sensors_locations = self . placer () mask_array = np . zeros (( 1 , n_features )) mask_array [:, sensors_locations ] = 1 self . mask_array = mask_array self . M = self . m_dot ( self . modes , self . modes , mask_array = mask_array ) self . M_inv = np . linalg . inv ( self . M ) print ( f \"The condition number for the matrix M is { np . linalg . cond ( self . M ) } \" ) def project ( self , data = None ): data_til = self . mask_array * data f = self . m_dot ( data_til , self . modes , mask_array = self . mask_array ) return f @ self . M_inv . T def reconstruct ( self , projected_data = None ): return self . pca . reconstruct ( projected_data = projected_data )","title":"GPOD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.GPOD.__init__","text":"GPOD Parameters: Name Type Description Default pca_type str the kind of PCA to be used (Default value = \"pod\") 'pod' pca_config (Default value = None) None config (Default value = None) None Source code in simulai/rom/_rom.py 840 841 842 843 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 886 887 888 889 890 891 def __init__ ( self , pca_type = \"pod\" , pca_config = None , config = None ): \"\"\"GPOD Args: pca_type (str, optional): the kind of PCA to be used (Default value = \"pod\") pca_config: (Default value = None) config: (Default value = None) \"\"\" super () . __init__ () this_module = importlib . import_module ( \"simulai.rom\" ) # A PCA instance is used for constructing the basis self . pca_type = pca_type self . config = config self . sensors_distribution = None self . n_sensors = None self . sensors_placer = None for key , value in config . items (): setattr ( self , key , value ) assert self . sensors_distribution , \"sensors_distribution must be provided\" if not self . sensors_placer or self . sensors_placer != \"extrema\" : print ( \"As no placement criteria eas provided for the sensor, the extrema method will be used.\" ) self . sensors_placer = \"extrema\" else : raise Exception ( f \"The placement method { self . sensors_placer } is not supported.\" ) if self . sensors_placer == \"extrema\" : assert all ( [ not item % 2 for item in self . sensors_distribution ] ), \"If extrema placement is being used, all the number of sensors must be pair\" self . placer = getattr ( self , \"_\" + self . sensors_placer ) self . n_sensors = sum ( self . sensors_distribution ) self . pca_class = getattr ( this_module , self . pca_type . upper ()) self . pca = self . pca_class ( config = pca_config ) self . modes = None self . M = None self . M_inv = None self . mask_array = None","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#hosvd","text":"Bases: ROM High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. Source code in simulai/rom/_rom.py 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 637 638 639 640 641 642 643 644 645 646 647 648 649 650 651 652 653 654 655 656 657 658 659 660 661 662 663 664 665 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 702 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 722 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 761 762 763 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 class HOSVD ( ROM ): \"\"\"High-Order Singular Value Decomposition It executes the High-Order SVD using a multidimensional array as input. This class is intended to be used for Big Data purposes. \"\"\" name = \"hosvd\" def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" ) def _configure_SVD ( self ) -> Union [ List [ TruncatedSVD ], List [ ParallelSVD ]]: if self . engine == \"sklearn\" : return [ TruncatedSVD ( n_components = n ) for n in self . n_components ] elif self . engine == \"dask\" : return [ ParallelSVD ( n_components = n ) for n in self . n_components ] else : raise Exception ( f \"The engine { self . engine } is not supported, it must be in ['sklearn', 'dask'].\" ) def _set_components ( self ) -> None : for j , name in enumerate ( self . components_names ): setattr ( self , name . upper () + self . _comp_tag , self . U_list [ j ]) def _k_svd ( self , data : np . ndarray = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"SVD applied to the k-mode flattening Args: data (np.ndarray, optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: Left eigenvectors matrix U \"\"\" self . svd_classes [ k ] . fit ( data ) if self . engine == \"sklearn\" : s = self . svd_classes [ k ] . singular_values_ * np . eye ( self . n_components [ k ]) VT = self . svd_classes [ k ] . components_ SVT = s @ VT U = ( np . linalg . pinv ( SVT . T ) @ data . T ) . T else : U = getattr ( self . svd_classes [ k ], \"U\" ) return U def _k_flattening ( self , data : Union [ np . ndarray , da . core . Array ] = None , k : int = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"k-mode flattening Args: data (Union[np.ndarray, da.core.Array], optional): (Default value = None) k (int, optional): (Default value = None) Returns: np.ndarray: reshaped array of shape (n_1, n_2*n_3*...*n_n) \"\"\" sizelist = copy . deepcopy ( self . sizelist ) sizelist_collapsible = copy . deepcopy ( sizelist ) sizelist [ 0 ] = k sizelist [ k ] = 0 sizelist_collapsible . pop ( k ) collapsible_dims = np . prod ([ self . shape [ s ] for s in sizelist_collapsible ]) if isinstance ( data , da . core . Array ): return data . transpose ( sizelist ) . reshape ( ( - 1 , collapsible_dims ), limit = self . limit ) else : return data . transpose ( sizelist ) . reshape ( - 1 , collapsible_dims ) def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components () def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose () # Saving to disk the complete model def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"HOSVD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.__init__","text":"Parameters: Name Type Description Default n_components List [ int ] list with the number of components for each direction (Default value = None) None components_names List [ str ] (Default value = None) None engine str (Default value = \"sklearn\") 'sklearn' limit str (Default value = \"1 GiB\") '1 GiB' Returns: Name Type Description None None nothing Source code in simulai/rom/_rom.py 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 def __init__ ( self , n_components : List [ int ] = None , components_names : List [ str ] = None , engine : str = \"sklearn\" , limit : str = \"1 GiB\" , ) -> None : \"\"\" Args: n_components (List[int], optional): list with the number of components for each direction (Default value = None) components_names (List[str], optional): (Default value = None) engine (str, optional): (Default value = \"sklearn\") limit (str, optional): (Default value = \"1 GiB\") Returns: None: nothing \"\"\" super () . __init__ () self . n_components = n_components # Naming the components of the HOSVD decomposition if components_names is None : self . components_names = [ f \"component_ { i } \" for i in range ( len ( self . n_components )) ] else : assert len ( components_names ) == len ( n_components ), ( \"The number of components must be equal\" \" to the number of names.\" ) self . components_names = components_names self . engine = engine self . limit = limit self . svd_classes = self . _configure_SVD () self . sizelist = None self . shape = None self . n_dims = None self . _comp_tag = \"_decomp\" self . U_list = list () self . S = None self . k_svd = self . _k_svd if self . engine == \"sklearn\" : self . lin = np elif self . engine == \"dask\" : self . lin = da else : raise Exception ( f \"The engine { self . engine } is not supported.\" )","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.fit","text":"Executing High-Order SVD Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 666 667 668 669 670 671 672 673 674 675 676 677 678 679 680 681 682 683 684 685 686 687 688 689 690 691 692 693 694 695 696 697 698 699 700 701 def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> None : \"\"\"Executing High-Order SVD Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: : nothing \"\"\" import pprint pprinter = pprint . PrettyPrinter ( indent = 2 ) self . n_dims = len ( data . shape ) self . shape = data . shape S = data self . sizelist = np . arange ( self . n_dims ) . tolist () print ( \"Using the SVD classes: \\n \" ) pprinter . pprint ( self . svd_classes ) print ( \" \\n \" ) for k in range ( self . n_dims ): print ( f \"Executing SVD for the dimension { k } \" ) data_k_flatten = self . _k_flattening ( data = data , k = k ) U = self . k_svd ( data = data_k_flatten , k = k ) self . U_list . append ( U ) S = self . lin . tensordot ( S , U , axes = ([ 0 ], [ 0 ])) self . S = np . array ( S ) self . _set_components ()","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.project","text":"Projecting using the SVD basis Parameters: Name Type Description Default data Union [ ndarray , Array ] input array of shape (n_1, n_2, ..., n_n) (Default value = None) None Returns: Type Description Union [ ndarray , Array ] np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') Source code in simulai/rom/_rom.py 703 704 705 706 707 708 709 710 711 712 713 714 715 716 717 718 719 720 721 def project ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Projecting using the SVD basis Args: data (Union[np.ndarray, da.core.Array], optional): input array of shape (n_1, n_2, ..., n_n) (Default value = None) Returns: np.ndarray: reduced array of shape (n_1', n_2', ..., n_n') \"\"\" assert len ( data . shape ) == self . n_dims S = data for k in range ( self . n_dims ): S = np . tensordot ( S , self . U_list [ k ], axes = ([ 0 ], [ 0 ])) return S","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.reconstruct","text":"Reconstruction using the pre-existent basis Parameters: Name Type Description Default data Union [ ndarray , Array ] reduced array of shape (n_1', n_2', ..., None n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: Type Description Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., Union [ ndarray , Array ] n_n) Source code in simulai/rom/_rom.py 723 724 725 726 727 728 729 730 731 732 733 734 735 736 737 738 739 740 741 742 743 744 745 746 747 748 749 750 751 752 753 754 755 756 757 758 759 760 def reconstruct ( self , data : Union [ np . ndarray , da . core . Array ] = None , replace_components : dict = None , ) -> Union [ np . ndarray , da . core . Array ]: \"\"\"Reconstruction using the pre-existent basis Args: data (Union[np.ndarray, da.core.Array], optional): reduced array of shape (n_1', n_2', ..., n_n') (Default value = None) replace_components (dict, optional): (Default value = None) Returns: np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., np.ndarray: reconstructed array of shape (n_1, n_2, n_3,..., n_n) \"\"\" if replace_components is not None : U_list = copy . deepcopy ( self . U_list ) for key , value in replace_components . items (): try : index = self . components_names . index ( key ) except : raise Exception ( f \"The key { key } is not in the list of components.\" ) U_list [ index ] = value else : U_list = self . U_list A = data modes = np . arange ( self . n_dims ) . tolist () for k in modes : A = np . tensordot ( U_list [ k ], A , axes = ([ 1 ], [ k ])) return A . transpose ()","title":"reconstruct()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.HOSVD.save","text":"Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 764 765 766 767 768 769 770 771 772 773 774 775 776 777 778 779 780 781 782 783 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"lin\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"save()"},{"location":"simulai_rom/simulai_rom/#parallelsvd","text":"Bases: ROM Source code in simulai/rom/_rom.py 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 class ParallelSVD ( ROM ): name = \"parallel_svd\" def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None def _chunk_size_condition ( self , size : int , chunk_size : int ) -> int : if size // chunk_size == 0 : return size else : return size // chunk_size def fit ( self , data : Union [ np . ndarray , da . core . Array ] = None ) -> Union [ np . ndarray , da . core . Array ]: if self . chunks == None : chunks = [ self . _chunk_size_condition ( size , self . default_chunks_numbers [ j ]) for j , size in enumerate ( data . shape ) ] else : chunks = self . chunks if isinstance ( data , np . ndarray ): parallel_data = da . from_array ( data , chunks = chunks ) else : parallel_data = data U , s , V = da . linalg . svd_compressed ( parallel_data , k = self . n_components ) self . U = U self . s = s self . V = V","title":"ParallelSVD"},{"location":"simulai_rom/simulai_rom/#simulai.rom.ParallelSVD.__init__","text":"Executing SVD using dask Parameters: Name Type Description Default n_components int (Default value = None) None chunks Tuple [ int ] (Default value = None) None Source code in simulai/rom/_rom.py 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 def __init__ ( self , n_components : int = None , chunks : Tuple [ int ] = None ) -> None : \"\"\"Executing SVD using dask Args: n_components (int, optional): (Default value = None) chunks (Tuple[int], optional): (Default value = None) \"\"\" super () . __init__ () self . n_components = n_components self . chunks = chunks self . default_chunks_numbers = ( 10 , 10 ) self . U = None self . s = None self . V = None","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#qqm","text":"Source code in simulai/rom/_rom.py 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 class QQM : def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , ) def _kronecker_product ( self , a : np . ndarray = None , b : np . ndarray = None ) -> np . ndarray : \"\"\"It executes a Kronecker dot between two arrays Args: a (np.ndarray, optional): left array (Default value = None) b (np.ndarray, optional): right (transposed) array (Default value = None) Returns: np.ndarray: the Kronecker output array \"\"\" assert ( a . shape == b . shape ), f \"a and b must have the same shape, but received { a . shape } and { b . shape } \" kron_output = np . einsum ( \"bi, bj->bij\" , a , b ) assert ( np . isnan ( kron_output ) . max () == False ), \"There are NaN in the Kronecker output\" # Checking if the Kronecker output tensor is symmetric or not if np . array_equal ( kron_output , kron_output . transpose ( 0 , 2 , 1 )): return kron_output [:, self . i_u , self . j_u ] else : shapes = kron_output . shape [ 1 :] return kron_output . reshape ( - 1 , np . prod ( shapes )) # Each batch in W has n_inputs*(n_inputs + 1)/2 columns def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data ) def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" ) def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ] def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"QQM"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.W_transform","text":"W_transform simply applied Kronecker product for data itself Parameters: Name Type Description Default data ndarray the data to be W-transformed (Default value = None) None Returns: Type Description ndarray np.ndarray: the Kronecker product between data and data ndarray np.ndarray: the Kronecker product between data and data ndarray itself Source code in simulai/rom/_rom.py 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 def W_transform ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"W_transform simply applied Kronecker product for data itself Args: data (np.ndarray, optional): the data to be W-transformed (Default value = None) Returns: np.ndarray: the Kronecker product between data and data np.ndarray: the Kronecker product between data and data itself \"\"\" return self . _kronecker_product ( a = data , b = data )","title":"W_transform()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.__init__","text":"It extends and enriches the POD approach by determining a quadratic basis for its residual Parameters: Name Type Description Default n_inputs int number of inputs used in the POD approximation (Default value = None) None alpha_0 float regularization parameter used in SparSA algorithm (Default value = None) None sparsity_tol float sparsity tolerance used in SpaRSA (Default value = 1e-15) 1e-15 lambd float regularization parameter used in SparSA algorithm (Default value = None) None epsilon float threshold for zeroing columns in SpaRSA (Default value = 1e-10) 1e-10 use_mean bool use mean for the SpaRSA loss function of not ? (Default value = False) False Returns: Type Description None nothing Source code in simulai/rom/_rom.py 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 def __init__ ( self , n_inputs : int = None , alpha_0 : float = None , sparsity_tol : float = 1e-15 , lambd : float = None , epsilon : float = 1e-10 , use_mean : bool = False , ) -> None : \"\"\"It extends and enriches the POD approach by determining a quadratic basis for its residual Args: n_inputs (int, optional): number of inputs used in the POD approximation (Default value = None) alpha_0 (float, optional): regularization parameter used in SparSA algorithm (Default value = None) sparsity_tol (float, optional): sparsity tolerance used in SpaRSA (Default value = 1e-15) lambd (float, optional): regularization parameter used in SparSA algorithm (Default value = None) epsilon (float, optional): threshold for zeroing columns in SpaRSA (Default value = 1e-10) use_mean (bool, optional): use mean for the SpaRSA loss function of not ? (Default value = False) Returns: : nothing \"\"\" self . alpha_0 = alpha_0 self . lambd = lambd self . epsilon = epsilon self . n_inputs = n_inputs self . i_u , self . j_u = np . triu_indices ( self . n_inputs ) self . V_bar = None self . valid_indices = None self . optimizer = SpaRSA ( lambd = self . lambd , alpha_0 = alpha_0 , use_mean = use_mean , sparsity_tol = sparsity_tol , epsilon = epsilon , transform = self . W_transform , )","title":"__init__()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.eval","text":"It projects and reconstructs Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description None np.ndarray: the approximated data Source code in simulai/rom/_rom.py 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 def eval ( self , data : np . ndarray = None ) -> None : \"\"\"It projects and reconstructs Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the approximated data \"\"\" return self . W_transform ( data = data ) @ self . V_bar","title":"eval()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.fit","text":"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Parameters: Name Type Description Default input_data ndarray in general, the original latent None series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 def fit ( self , input_data : np . ndarray = None , target_data : np . ndarray = None , pinv : bool = False , ) -> None : \"\"\"It executes the fitting process using the chosen optimization algorithm, SpaRSA or Moore-Penrose pseudoinverse Args: input_data (np.ndarray, optional): in general, the original latent series (Default value = None) target_data (np.ndarray, optional): in general, the residual of the linear approximation (Default value = None) pinv (bool, optional): use pseudoinverse or not (Default value = False) Returns: : nothing \"\"\" if not pinv : self . V_bar = self . optimizer . fit ( input_data = input_data , target_data = target_data ) else : V_bar = np . linalg . pinv ( self . W_transform ( data = input_data )) @ target_data self . V_bar = np . where ( np . abs ( V_bar ) < self . optimizer . sparsity_tol , 0 , V_bar ) self . valid_indices = np . argwhere ( np . sum ( np . abs ( self . V_bar ), axis = 1 ) > 0 ) . flatten () print ( f \" \\n Number of original modes: { self . i_u . size } . Number of modes selected: { self . valid_indices . size } \" )","title":"fit()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.project","text":"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Parameters: Name Type Description Default data ndarray the data to be projected (Default value = None) None Returns: Type Description ndarray np.ndarray: the projection over the selected basis Source code in simulai/rom/_rom.py 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 def project ( self , data : np . ndarray = None ) -> np . ndarray : \"\"\"Executes the W-transformation and collects just the valid modes determined by the optimization algorithm Args: data (np.ndarray, optional): the data to be projected (Default value = None) Returns: np.ndarray: the projection over the selected basis \"\"\" return self . W_transform ( data = data )[:, self . valid_indices ]","title":"project()"},{"location":"simulai_rom/simulai_rom/#simulai.rom.QQM.save","text":"Complete saving Parameters: Name Type Description Default save_path str path to the saving directory (Default value = None) None model_name str name for the model (Default value = None) None Returns: Type Description None nothing Source code in simulai/rom/_rom.py 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 def save ( self , save_path : str = None , model_name : str = None ) -> None : \"\"\"Complete saving Args: save_path (str, optional): path to the saving directory (Default value = None) model_name (str, optional): name for the model (Default value = None) Returns: : nothing \"\"\" blacklist = [ \"optimizer\" ] for el in blacklist : setattr ( self , el , None ) path = os . path . join ( save_path , model_name + \".pkl\" ) try : with open ( path , \"wb\" ) as fp : pickle . dump ( self , fp , protocol = 4 ) except Exception as e : print ( e , e . args )","title":"save()"}]} \ No newline at end of file diff --git a/simulai_activations/index.html b/simulai_activations/index.html index 047f758..9927e52 100644 --- a/simulai_activations/index.html +++ b/simulai_activations/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_batching/index.html b/simulai_batching/index.html index 8871f35..6305fd2 100644 --- a/simulai_batching/index.html +++ b/simulai_batching/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_file/index.html b/simulai_file/index.html index 95e7d3a..054fc57 100644 --- a/simulai_file/index.html +++ b/simulai_file/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_io/index.html b/simulai_io/index.html index 1e74d6e..9ff3210 100644 --- a/simulai_io/index.html +++ b/simulai_io/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_models/simulai_models_autoencoder/index.html b/simulai_models/simulai_models_autoencoder/index.html index cbdc35a..55ee5ad 100644 --- a/simulai_models/simulai_models_autoencoder/index.html +++ b/simulai_models/simulai_models_autoencoder/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_models/simulai_models_deeponet/index.html b/simulai_models/simulai_models_deeponet/index.html index 17b867d..270a8a4 100644 --- a/simulai_models/simulai_models_deeponet/index.html +++ b/simulai_models/simulai_models_deeponet/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_models/simulai_models_transformer/index.html b/simulai_models/simulai_models_transformer/index.html index 21e4b44..c9bceb3 100644 --- a/simulai_models/simulai_models_transformer/index.html +++ b/simulai_models/simulai_models_transformer/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_models/simulai_models_unet/index.html b/simulai_models/simulai_models_unet/index.html index 167ef22..44ca00d 100644 --- a/simulai_models/simulai_models_unet/index.html +++ b/simulai_models/simulai_models_unet/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_optimization/simulai_losses/index.html b/simulai_optimization/simulai_losses/index.html new file mode 100644 index 0000000..7dec258 --- /dev/null +++ b/simulai_optimization/simulai_losses/index.html @@ -0,0 +1,3247 @@ + + + + Loss Functions - SimulAI's Documentation + + + + + + + + + + + + + + + + + + + + + + + + + + + +
+
+ + + +
+ +
+
+ +
+ + +
+
+
+ +

Loss Functions

+ +
+

Loss Functions#

+

RMSELoss#

+ + +
+ + + + +
+

+ Bases: LossBasics

+ + +
+ Source code in simulai/optimization/_losses.py +
163
+164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
+175
+176
+177
+178
+179
+180
+181
+182
+183
+184
+185
+186
+187
+188
+189
+190
+191
+192
+193
+194
+195
+196
+197
+198
+199
+200
+201
+202
+203
+204
+205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
class RMSELoss(LossBasics):
+    def __init__(self, operator: torch.nn.Module = None) -> None:
+        """Vanilla mean-squared error loss function
+
+        Args:
+            operator (torch.nn.Module): the operator used for evaluating
+                the loss function (usually a neural network)
+        """
+        super().__init__()
+
+        self.operator = operator
+        self.loss_states = {"loss": list()}
+
+    def _data_loss(
+        self,
+        output_tilde: torch.Tensor = None,
+        norm_value: torch.Tensor = None,
+        target_data_tensor: torch.Tensor = None,
+    ) -> torch.Tensor:
+        """It executes the evaluation of the data-driven mean-squared error
+
+        Args:
+            output_tilde (torch.Tensor): the output generated by
+                self.operator
+            norm_value (torch.Tensor): the value used for normalizing
+                the loss evaluation
+            target_data_tensor (torch.Tensor): the target tensor to be
+                compared with output_tilde
+
+        Returns:
+            torch.Tensor: the loss function value for a given state
+        """
+
+        if norm_value is not None:
+            data_loss = torch.mean(
+                torch.square((output_tilde - target_data_tensor) / norm_value)
+            )
+        else:
+            data_loss = torch.mean(torch.square((output_tilde - target_data_tensor)))
+
+        return data_loss
+
+    def __call__(
+        self,
+        input_data: Union[dict, torch.Tensor] = None,
+        target_data: torch.Tensor = None,
+        call_back: str = "",
+        norm_value: list = None,
+        lambda_1: float = 0.0,
+        device: str = "cpu",
+        lambda_2: float = 0.0,
+    ) -> Callable:
+        """Main function for generating complete loss function workflow
+
+        Args:
+            input_data (Union[dict, torch.Tensor]): the data used as
+                input for self.operator
+            target_data (torch.Tensor): the target data used for
+                training self.oeprator
+            call_back (str): a string used for composing the logging of
+                the optimization process
+            norm_value (list): a list of values used for normalizing the
+                loss temrms
+            lambda_1 (float): the penalty for the L^1  regularization
+                term
+            lambda_2 (float): the penalty for the L^2  regularization
+                term
+            device (str): the device in which the loss evaluation will
+                be executed, 'cpu' or 'gpu'
+
+        Returns:
+            Callable: the closure function used for evaluating the loss
+            value
+        """
+
+        l1_reg_multiplication = self._exec_multiplication_in_regularization(
+            lambda_type=type(lambda_1), term_type=type(self.operator.weights_l1)
+        )
+
+        l2_reg_multiplication = self._exec_multiplication_in_regularization(
+            lambda_type=type(lambda_2), term_type=type(self.operator.weights_l2)
+        )
+
+        def closure():
+            output_tilde = self.operator.forward(**input_data)
+
+            data_loss = self._data_loss(
+                output_tilde=output_tilde,
+                norm_value=norm_value,
+                target_data_tensor=target_data,
+            )
+
+            # L² and L¹ regularization term
+            weights_l2 = self.operator.weights_l2
+            weights_l1 = self.operator.weights_l1
+
+            # beta *||W||_2 + alpha * ||W||_1
+            l2_reg = l2_reg_multiplication(lambda_2, weights_l2)
+            l1_reg = l1_reg_multiplication(lambda_1, weights_l1)
+
+            # Loss = ||Ũ_t - U_t||_2  +
+            #         lambda_1 *||W||_2 + lambda_2 * ||W||_1
+
+            loss = data_loss + l2_reg + l1_reg
+
+            # Back-propagation
+            loss.backward()
+
+            self.loss_states["loss"].append(float(loss.detach().data))
+
+            sys.stdout.write(("\rloss: {} {}").format(loss, call_back))
+            sys.stdout.flush()
+
+            return loss
+
+        return closure
+
+
+ + + +
+ + + + + + + + + + +
+ + + + +

+ __call__(input_data=None, target_data=None, call_back='', norm_value=None, lambda_1=0.0, device='cpu', lambda_2=0.0) + +#

+ + +
+ +

Main function for generating complete loss function workflow

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
input_data + Union[dict, Tensor] + +
+

the data used as +input for self.operator

+
+
+ None +
target_data + Tensor + +
+

the target data used for +training self.oeprator

+
+
+ None +
call_back + str + +
+

a string used for composing the logging of +the optimization process

+
+
+ '' +
norm_value + list + +
+

a list of values used for normalizing the +loss temrms

+
+
+ None +
lambda_1 + float + +
+

the penalty for the L^1 regularization +term

+
+
+ 0.0 +
lambda_2 + float + +
+

the penalty for the L^2 regularization +term

+
+
+ 0.0 +
device + str + +
+

the device in which the loss evaluation will +be executed, 'cpu' or 'gpu'

+
+
+ 'cpu' +
+ + + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
Callable + Callable + +
+

the closure function used for evaluating the loss

+
+
+ Callable + +
+

value

+
+
+ +
+ Source code in simulai/optimization/_losses.py +
205
+206
+207
+208
+209
+210
+211
+212
+213
+214
+215
+216
+217
+218
+219
+220
+221
+222
+223
+224
+225
+226
+227
+228
+229
+230
+231
+232
+233
+234
+235
+236
+237
+238
+239
+240
+241
+242
+243
+244
+245
+246
+247
+248
+249
+250
+251
+252
+253
+254
+255
+256
+257
+258
+259
+260
+261
+262
+263
+264
+265
+266
+267
+268
+269
+270
+271
+272
+273
+274
+275
+276
+277
+278
def __call__(
+    self,
+    input_data: Union[dict, torch.Tensor] = None,
+    target_data: torch.Tensor = None,
+    call_back: str = "",
+    norm_value: list = None,
+    lambda_1: float = 0.0,
+    device: str = "cpu",
+    lambda_2: float = 0.0,
+) -> Callable:
+    """Main function for generating complete loss function workflow
+
+    Args:
+        input_data (Union[dict, torch.Tensor]): the data used as
+            input for self.operator
+        target_data (torch.Tensor): the target data used for
+            training self.oeprator
+        call_back (str): a string used for composing the logging of
+            the optimization process
+        norm_value (list): a list of values used for normalizing the
+            loss temrms
+        lambda_1 (float): the penalty for the L^1  regularization
+            term
+        lambda_2 (float): the penalty for the L^2  regularization
+            term
+        device (str): the device in which the loss evaluation will
+            be executed, 'cpu' or 'gpu'
+
+    Returns:
+        Callable: the closure function used for evaluating the loss
+        value
+    """
+
+    l1_reg_multiplication = self._exec_multiplication_in_regularization(
+        lambda_type=type(lambda_1), term_type=type(self.operator.weights_l1)
+    )
+
+    l2_reg_multiplication = self._exec_multiplication_in_regularization(
+        lambda_type=type(lambda_2), term_type=type(self.operator.weights_l2)
+    )
+
+    def closure():
+        output_tilde = self.operator.forward(**input_data)
+
+        data_loss = self._data_loss(
+            output_tilde=output_tilde,
+            norm_value=norm_value,
+            target_data_tensor=target_data,
+        )
+
+        # L² and L¹ regularization term
+        weights_l2 = self.operator.weights_l2
+        weights_l1 = self.operator.weights_l1
+
+        # beta *||W||_2 + alpha * ||W||_1
+        l2_reg = l2_reg_multiplication(lambda_2, weights_l2)
+        l1_reg = l1_reg_multiplication(lambda_1, weights_l1)
+
+        # Loss = ||Ũ_t - U_t||_2  +
+        #         lambda_1 *||W||_2 + lambda_2 * ||W||_1
+
+        loss = data_loss + l2_reg + l1_reg
+
+        # Back-propagation
+        loss.backward()
+
+        self.loss_states["loss"].append(float(loss.detach().data))
+
+        sys.stdout.write(("\rloss: {} {}").format(loss, call_back))
+        sys.stdout.flush()
+
+        return loss
+
+    return closure
+
+
+
+ +
+ + +
+ + + + +

+ __init__(operator=None) + +#

+ + +
+ +

Vanilla mean-squared error loss function

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
operator + Module + +
+

the operator used for evaluating +the loss function (usually a neural network)

+
+
+ None +
+ +
+ Source code in simulai/optimization/_losses.py +
164
+165
+166
+167
+168
+169
+170
+171
+172
+173
+174
def __init__(self, operator: torch.nn.Module = None) -> None:
+    """Vanilla mean-squared error loss function
+
+    Args:
+        operator (torch.nn.Module): the operator used for evaluating
+            the loss function (usually a neural network)
+    """
+    super().__init__()
+
+    self.operator = operator
+    self.loss_states = {"loss": list()}
+
+
+
+ +
+ + + +
+ +
+ +

WRMSELoss#

+ + +
+ + + + +
+

+ Bases: LossBasics

+ + +
+ Source code in simulai/optimization/_losses.py +
282
+283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
+303
+304
+305
+306
+307
+308
+309
+310
+311
+312
+313
+314
+315
+316
+317
+318
+319
+320
+321
+322
+323
+324
+325
+326
+327
+328
+329
+330
+331
+332
+333
+334
+335
+336
+337
+338
+339
+340
+341
+342
+343
+344
+345
+346
+347
+348
+349
+350
+351
+352
+353
+354
+355
+356
+357
+358
+359
+360
+361
+362
+363
+364
+365
+366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
class WRMSELoss(LossBasics):
+    def __init__(self, operator=None):
+        """Weighted mean-squared error loss function
+
+        Args:
+            operator (torch.nn.Module): the operator used for evaluating
+                the loss function (usually a neural network)
+        """
+
+        super().__init__()
+
+        self.operator = operator
+        self.split_dim = 1
+        self.tol = 1e-25
+
+        self.loss_evaluator = None
+        self.norm_evaluator = None
+
+        self.axis_loss_evaluator = lambda res: torch.mean(torch.square((res)), dim=1)
+
+        self.loss_states = {"loss": list()}
+
+    def _data_loss(
+        self,
+        output_tilde: torch.Tensor = None,
+        weights: list = None,
+        target_data_tensor: torch.Tensor = None,
+        axis: int = -1,
+    ) -> List:
+        """It executes the evaluation of the data-driven mean-squared error
+
+        Args:
+            output_tilde (torch.Tensor): the output generated by
+                self.operator
+            norm_value (torch.Tensor): the value used for normalizing
+                the loss evaluation
+            target_data_tensor (torch.Tensor): the target tensor to be
+                compared with output_tilde
+
+        Returns:
+            torch.Tensor: the loss function value for a given state
+        """
+
+        output_split = torch.split(output_tilde, self.split_dim, dim=axis)
+        target_split = torch.split(target_data_tensor, self.split_dim, dim=axis)
+
+        data_losses = [
+            weights[i]
+            * self.loss_evaluator(out_split - tgt_split)
+            / self.norm_evaluator(tgt_split)
+            for i, (out_split, tgt_split) in enumerate(zip(output_split, target_split))
+        ]
+
+        return data_losses
+
+    def _no_data_loss_wrapper(
+        self,
+        output_tilde: torch.Tensor = None,
+        weights: list = None,
+        target_data_tensor: torch.Tensor = None,
+        axis: int = -1,
+    ) -> torch.Tensor:
+        """It executes the evaluation of the data-driven mean-squared error without considering causality preserving
+
+        Args:
+            output_tilde (torch.Tensor): the output generated by
+                self.operator
+            weights (list): weights for rescaling each variable
+                outputted by self.operator
+            target_data_tensor (torch.Tensor): the target tensor to be
+                compared with output_tilde
+            axis (int): the axis in which the variables are split
+
+        Returns:
+            torch.Tensor: the loss function value for a given state
+        """
+
+        return self.data_loss(
+            output_tilde=output_tilde,
+            weights=weights,
+            target_data_tensor=target_data_tensor,
+            axis=axis,
+        )
+
+    def __call__(
+        self,
+        input_data: Union[dict, torch.Tensor] = None,
+        target_data: torch.Tensor = None,
+        call_back: str = "",
+        lambda_1: float = 0.0,
+        lambda_2: float = 0.0,
+        axis: int = -1,
+        relative: bool = False,
+        device: str = "cpu",
+        weights: list = None,
+        use_mean: bool = True,
+    ) -> Callable:
+        """Main function for generating complete loss function workflow
+
+        Args:
+            input_data (Union[dict, torch.Tensor]): the data used as
+                input for self.operator
+            target_data (torch.Tensor): the target data used for
+                training self.oeprator
+            call_back (str): a string used for composing the logging of
+                the optimization process
+            norm_value (list): a list of values used for normalizing the
+                loss temrms
+            lambda_1 (float): the penalty for the L^1  regularization
+                term
+            lambda_2 (float): the penalty for the L^2  regularization
+                term
+            device (str): the device in which the loss evaluation will
+                be executed, 'cpu' or 'gpu'
+            weights (list): a list of weights for rescaling each
+                variable outputted by self.operator
+            use_mean (bool): use mean for evaluating the losses or not
+                (the alternative is sum)
+
+        Returns:
+            Callable: the closure function used for evaluating the loss
+            value
+        """
+
+        self.data_loss = self._data_loss
+
+        l1_reg_multiplication = self._exec_multiplication_in_regularization(
+            lambda_type=type(lambda_1), term_type=type(self.operator.weights_l1)
+        )
+
+        l2_reg_multiplication = self._exec_multiplication_in_regularization(
+            lambda_type=type(lambda_2), term_type=type(self.operator.weights_l2)
+        )
+
+        # Using mean evaluation or not
+        if use_mean == True:
+            self.loss_evaluator = lambda res: torch.mean(torch.square((res)))
+        else:
+            self.loss_evaluator = lambda res: torch.sum(torch.square((res)))
+
+        # Relative norm or not
+        if relative == True:
+            if use_mean == True:
+                self.norm_evaluator = lambda ref: torch.mean(torch.square((ref)))
+            else:
+                self.norm_evaluator = lambda ref: torch.sum(torch.square((ref)))
+        else:
+            self.norm_evaluator = lambda ref: 1
+
+        self.data_loss_wrapper = self._no_data_loss_wrapper
+
+        def closure():
+            output_tilde = self.operator.forward(**input_data)
+
+            data_losses = self.data_loss_wrapper(
+                output_tilde=output_tilde,
+                weights=weights,
+                target_data_tensor=target_data,
+                axis=axis,
+            )
+
+            # L² and L¹ regularization term
+            weights_l2 = self.operator.weights_l2
+            weights_l1 = self.operator.weights_l1
+
+            # beta *||W||_2 + alpha * ||W||_1
+            l2_reg = l2_reg_multiplication(lambda_2, weights_l2)
+            l1_reg = l1_reg_multiplication(lambda_1, weights_l1)
+
+            # Loss = ||Ũ_t - U_t||_2  +
+            #         lambda_1 *||W||_2 + lambda_2 * ||W||_1
+            loss = sum(data_losses) + l2_reg + l1_reg
+
+            # Back-propagation
+            loss.backward()
+
+            self.loss_states["loss"].append(float(loss.detach().data))
+
+            sys.stdout.write(("\rloss: {} {}").format(loss, call_back))
+            sys.stdout.flush()
+
+        return closure
+
+
+ + + +
+ + + + + + + + + + +
+ + + + +

+ __call__(input_data=None, target_data=None, call_back='', lambda_1=0.0, lambda_2=0.0, axis=-1, relative=False, device='cpu', weights=None, use_mean=True) + +#

+ + +
+ +

Main function for generating complete loss function workflow

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
input_data + Union[dict, Tensor] + +
+

the data used as +input for self.operator

+
+
+ None +
target_data + Tensor + +
+

the target data used for +training self.oeprator

+
+
+ None +
call_back + str + +
+

a string used for composing the logging of +the optimization process

+
+
+ '' +
norm_value + list + +
+

a list of values used for normalizing the +loss temrms

+
+
+ required +
lambda_1 + float + +
+

the penalty for the L^1 regularization +term

+
+
+ 0.0 +
lambda_2 + float + +
+

the penalty for the L^2 regularization +term

+
+
+ 0.0 +
device + str + +
+

the device in which the loss evaluation will +be executed, 'cpu' or 'gpu'

+
+
+ 'cpu' +
weights + list + +
+

a list of weights for rescaling each +variable outputted by self.operator

+
+
+ None +
use_mean + bool + +
+

use mean for evaluating the losses or not +(the alternative is sum)

+
+
+ True +
+ + + +

Returns:

+ + + + + + + + + + + + + + + + + +
Name TypeDescription
Callable + Callable + +
+

the closure function used for evaluating the loss

+
+
+ Callable + +
+

value

+
+
+ +
+ Source code in simulai/optimization/_losses.py +
366
+367
+368
+369
+370
+371
+372
+373
+374
+375
+376
+377
+378
+379
+380
+381
+382
+383
+384
+385
+386
+387
+388
+389
+390
+391
+392
+393
+394
+395
+396
+397
+398
+399
+400
+401
+402
+403
+404
+405
+406
+407
+408
+409
+410
+411
+412
+413
+414
+415
+416
+417
+418
+419
+420
+421
+422
+423
+424
+425
+426
+427
+428
+429
+430
+431
+432
+433
+434
+435
+436
+437
+438
+439
+440
+441
+442
+443
+444
+445
+446
+447
+448
+449
+450
+451
+452
+453
+454
+455
+456
+457
+458
+459
+460
+461
+462
+463
def __call__(
+    self,
+    input_data: Union[dict, torch.Tensor] = None,
+    target_data: torch.Tensor = None,
+    call_back: str = "",
+    lambda_1: float = 0.0,
+    lambda_2: float = 0.0,
+    axis: int = -1,
+    relative: bool = False,
+    device: str = "cpu",
+    weights: list = None,
+    use_mean: bool = True,
+) -> Callable:
+    """Main function for generating complete loss function workflow
+
+    Args:
+        input_data (Union[dict, torch.Tensor]): the data used as
+            input for self.operator
+        target_data (torch.Tensor): the target data used for
+            training self.oeprator
+        call_back (str): a string used for composing the logging of
+            the optimization process
+        norm_value (list): a list of values used for normalizing the
+            loss temrms
+        lambda_1 (float): the penalty for the L^1  regularization
+            term
+        lambda_2 (float): the penalty for the L^2  regularization
+            term
+        device (str): the device in which the loss evaluation will
+            be executed, 'cpu' or 'gpu'
+        weights (list): a list of weights for rescaling each
+            variable outputted by self.operator
+        use_mean (bool): use mean for evaluating the losses or not
+            (the alternative is sum)
+
+    Returns:
+        Callable: the closure function used for evaluating the loss
+        value
+    """
+
+    self.data_loss = self._data_loss
+
+    l1_reg_multiplication = self._exec_multiplication_in_regularization(
+        lambda_type=type(lambda_1), term_type=type(self.operator.weights_l1)
+    )
+
+    l2_reg_multiplication = self._exec_multiplication_in_regularization(
+        lambda_type=type(lambda_2), term_type=type(self.operator.weights_l2)
+    )
+
+    # Using mean evaluation or not
+    if use_mean == True:
+        self.loss_evaluator = lambda res: torch.mean(torch.square((res)))
+    else:
+        self.loss_evaluator = lambda res: torch.sum(torch.square((res)))
+
+    # Relative norm or not
+    if relative == True:
+        if use_mean == True:
+            self.norm_evaluator = lambda ref: torch.mean(torch.square((ref)))
+        else:
+            self.norm_evaluator = lambda ref: torch.sum(torch.square((ref)))
+    else:
+        self.norm_evaluator = lambda ref: 1
+
+    self.data_loss_wrapper = self._no_data_loss_wrapper
+
+    def closure():
+        output_tilde = self.operator.forward(**input_data)
+
+        data_losses = self.data_loss_wrapper(
+            output_tilde=output_tilde,
+            weights=weights,
+            target_data_tensor=target_data,
+            axis=axis,
+        )
+
+        # L² and L¹ regularization term
+        weights_l2 = self.operator.weights_l2
+        weights_l1 = self.operator.weights_l1
+
+        # beta *||W||_2 + alpha * ||W||_1
+        l2_reg = l2_reg_multiplication(lambda_2, weights_l2)
+        l1_reg = l1_reg_multiplication(lambda_1, weights_l1)
+
+        # Loss = ||Ũ_t - U_t||_2  +
+        #         lambda_1 *||W||_2 + lambda_2 * ||W||_1
+        loss = sum(data_losses) + l2_reg + l1_reg
+
+        # Back-propagation
+        loss.backward()
+
+        self.loss_states["loss"].append(float(loss.detach().data))
+
+        sys.stdout.write(("\rloss: {} {}").format(loss, call_back))
+        sys.stdout.flush()
+
+    return closure
+
+
+
+ +
+ + +
+ + + + +

+ __init__(operator=None) + +#

+ + +
+ +

Weighted mean-squared error loss function

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
operator + Module + +
+

the operator used for evaluating +the loss function (usually a neural network)

+
+
+ None +
+ +
+ Source code in simulai/optimization/_losses.py +
283
+284
+285
+286
+287
+288
+289
+290
+291
+292
+293
+294
+295
+296
+297
+298
+299
+300
+301
+302
def __init__(self, operator=None):
+    """Weighted mean-squared error loss function
+
+    Args:
+        operator (torch.nn.Module): the operator used for evaluating
+            the loss function (usually a neural network)
+    """
+
+    super().__init__()
+
+    self.operator = operator
+    self.split_dim = 1
+    self.tol = 1e-25
+
+    self.loss_evaluator = None
+    self.norm_evaluator = None
+
+    self.axis_loss_evaluator = lambda res: torch.mean(torch.square((res)), dim=1)
+
+    self.loss_states = {"loss": list()}
+
+
+
+ +
+ + + +
+ +
+ +

PIRMSELoss#

+ + +
+ + + + +
+

+ Bases: LossBasics

+ + +
+ Source code in simulai/optimization/_losses.py +
 467
+ 468
+ 469
+ 470
+ 471
+ 472
+ 473
+ 474
+ 475
+ 476
+ 477
+ 478
+ 479
+ 480
+ 481
+ 482
+ 483
+ 484
+ 485
+ 486
+ 487
+ 488
+ 489
+ 490
+ 491
+ 492
+ 493
+ 494
+ 495
+ 496
+ 497
+ 498
+ 499
+ 500
+ 501
+ 502
+ 503
+ 504
+ 505
+ 506
+ 507
+ 508
+ 509
+ 510
+ 511
+ 512
+ 513
+ 514
+ 515
+ 516
+ 517
+ 518
+ 519
+ 520
+ 521
+ 522
+ 523
+ 524
+ 525
+ 526
+ 527
+ 528
+ 529
+ 530
+ 531
+ 532
+ 533
+ 534
+ 535
+ 536
+ 537
+ 538
+ 539
+ 540
+ 541
+ 542
+ 543
+ 544
+ 545
+ 546
+ 547
+ 548
+ 549
+ 550
+ 551
+ 552
+ 553
+ 554
+ 555
+ 556
+ 557
+ 558
+ 559
+ 560
+ 561
+ 562
+ 563
+ 564
+ 565
+ 566
+ 567
+ 568
+ 569
+ 570
+ 571
+ 572
+ 573
+ 574
+ 575
+ 576
+ 577
+ 578
+ 579
+ 580
+ 581
+ 582
+ 583
+ 584
+ 585
+ 586
+ 587
+ 588
+ 589
+ 590
+ 591
+ 592
+ 593
+ 594
+ 595
+ 596
+ 597
+ 598
+ 599
+ 600
+ 601
+ 602
+ 603
+ 604
+ 605
+ 606
+ 607
+ 608
+ 609
+ 610
+ 611
+ 612
+ 613
+ 614
+ 615
+ 616
+ 617
+ 618
+ 619
+ 620
+ 621
+ 622
+ 623
+ 624
+ 625
+ 626
+ 627
+ 628
+ 629
+ 630
+ 631
+ 632
+ 633
+ 634
+ 635
+ 636
+ 637
+ 638
+ 639
+ 640
+ 641
+ 642
+ 643
+ 644
+ 645
+ 646
+ 647
+ 648
+ 649
+ 650
+ 651
+ 652
+ 653
+ 654
+ 655
+ 656
+ 657
+ 658
+ 659
+ 660
+ 661
+ 662
+ 663
+ 664
+ 665
+ 666
+ 667
+ 668
+ 669
+ 670
+ 671
+ 672
+ 673
+ 674
+ 675
+ 676
+ 677
+ 678
+ 679
+ 680
+ 681
+ 682
+ 683
+ 684
+ 685
+ 686
+ 687
+ 688
+ 689
+ 690
+ 691
+ 692
+ 693
+ 694
+ 695
+ 696
+ 697
+ 698
+ 699
+ 700
+ 701
+ 702
+ 703
+ 704
+ 705
+ 706
+ 707
+ 708
+ 709
+ 710
+ 711
+ 712
+ 713
+ 714
+ 715
+ 716
+ 717
+ 718
+ 719
+ 720
+ 721
+ 722
+ 723
+ 724
+ 725
+ 726
+ 727
+ 728
+ 729
+ 730
+ 731
+ 732
+ 733
+ 734
+ 735
+ 736
+ 737
+ 738
+ 739
+ 740
+ 741
+ 742
+ 743
+ 744
+ 745
+ 746
+ 747
+ 748
+ 749
+ 750
+ 751
+ 752
+ 753
+ 754
+ 755
+ 756
+ 757
+ 758
+ 759
+ 760
+ 761
+ 762
+ 763
+ 764
+ 765
+ 766
+ 767
+ 768
+ 769
+ 770
+ 771
+ 772
+ 773
+ 774
+ 775
+ 776
+ 777
+ 778
+ 779
+ 780
+ 781
+ 782
+ 783
+ 784
+ 785
+ 786
+ 787
+ 788
+ 789
+ 790
+ 791
+ 792
+ 793
+ 794
+ 795
+ 796
+ 797
+ 798
+ 799
+ 800
+ 801
+ 802
+ 803
+ 804
+ 805
+ 806
+ 807
+ 808
+ 809
+ 810
+ 811
+ 812
+ 813
+ 814
+ 815
+ 816
+ 817
+ 818
+ 819
+ 820
+ 821
+ 822
+ 823
+ 824
+ 825
+ 826
+ 827
+ 828
+ 829
+ 830
+ 831
+ 832
+ 833
+ 834
+ 835
+ 836
+ 837
+ 838
+ 839
+ 840
+ 841
+ 842
+ 843
+ 844
+ 845
+ 846
+ 847
+ 848
+ 849
+ 850
+ 851
+ 852
+ 853
+ 854
+ 855
+ 856
+ 857
+ 858
+ 859
+ 860
+ 861
+ 862
+ 863
+ 864
+ 865
+ 866
+ 867
+ 868
+ 869
+ 870
+ 871
+ 872
+ 873
+ 874
+ 875
+ 876
+ 877
+ 878
+ 879
+ 880
+ 881
+ 882
+ 883
+ 884
+ 885
+ 886
+ 887
+ 888
+ 889
+ 890
+ 891
+ 892
+ 893
+ 894
+ 895
+ 896
+ 897
+ 898
+ 899
+ 900
+ 901
+ 902
+ 903
+ 904
+ 905
+ 906
+ 907
+ 908
+ 909
+ 910
+ 911
+ 912
+ 913
+ 914
+ 915
+ 916
+ 917
+ 918
+ 919
+ 920
+ 921
+ 922
+ 923
+ 924
+ 925
+ 926
+ 927
+ 928
+ 929
+ 930
+ 931
+ 932
+ 933
+ 934
+ 935
+ 936
+ 937
+ 938
+ 939
+ 940
+ 941
+ 942
+ 943
+ 944
+ 945
+ 946
+ 947
+ 948
+ 949
+ 950
+ 951
+ 952
+ 953
+ 954
+ 955
+ 956
+ 957
+ 958
+ 959
+ 960
+ 961
+ 962
+ 963
+ 964
+ 965
+ 966
+ 967
+ 968
+ 969
+ 970
+ 971
+ 972
+ 973
+ 974
+ 975
+ 976
+ 977
+ 978
+ 979
+ 980
+ 981
+ 982
+ 983
+ 984
+ 985
+ 986
+ 987
+ 988
+ 989
+ 990
+ 991
+ 992
+ 993
+ 994
+ 995
+ 996
+ 997
+ 998
+ 999
+1000
+1001
+1002
+1003
+1004
+1005
+1006
+1007
+1008
+1009
+1010
+1011
+1012
+1013
+1014
+1015
+1016
+1017
+1018
+1019
+1020
+1021
class PIRMSELoss(LossBasics):
+    def __init__(self, operator: torch.nn.Module = None) -> None:
+        """Physics-Informed mean-squared error loss function
+
+        Args:
+            operator (torch.nn.Module): the operator used for evaluating
+                the loss function (usually a neural network)
+        """
+
+        super().__init__()
+
+        self.split_dim = 1
+        self.operator = operator
+        self.loss_evaluator = None
+        self.residual = None
+        self.tol = 1e-15
+        self.device = None
+
+        self.axis_loss_evaluator = lambda res: torch.mean(torch.square((res)), dim=1)
+
+        self.loss_states = {
+            "pde": list(),
+            "init": list(),
+            "bound": list(),
+            "extra_data": list(),
+        }
+        self.loss_tags = list(self.loss_states.keys())
+        self.hybrid_data_pinn = False
+
+        self.losses_terms_indices = {
+            "pde": 0,
+            "init": 1,
+            "bound": 2,
+            "extra_data": 3,
+            "causality_weights": 4,
+        }
+
+    def _convert(
+        self, input_data: Union[dict, np.ndarray] = None, device: str = None
+    ) -> Union[dict, torch.Tensor]:
+        """It converts a dataset to the proper format (torch.Tensor) and send it to
+        the chosen execution device ('gpu' or 'cpu')
+
+        Args:
+            input_data (Union[dict, np.ndarray]): the data structure to
+                be converted
+            device: the device in which the converted dataset must be
+                placed
+
+        Returns:
+            Union[dict, torch.Tensor]: the converted data structure
+        """
+
+        if type(input_data) == dict:
+            return {
+                key: torch.from_numpy(item.astype(ARRAY_DTYPE)).to(device)
+                for key, item in input_data.items()
+            }
+
+        else:
+            return torch.from_numpy(input_data.astype(ARRAY_DTYPE)).to(device)
+
+    def _to_tensor(self, *args, device: str = "cpu") -> List[torch.Tensor]:
+        """It converted a size indefined list of arrays to tensors
+
+        Args:
+            *args: list of arrays to be converted
+            input_data (Union[dict, np.ndarray])
+            device: the device in which the converted dataset must be
+                placed
+        :type np.array, np.array, ..., np.array
+
+        Returns:
+            List[torch.Tensor]: a list of tensors
+        """
+        return [self._convert(input_data=arg, device=device) for arg in args]
+
+    def _data_loss(
+        self,
+        output_tilde: torch.Tensor = None,
+        target_data_tensor: torch.Tensor = None,
+        weights: List[float] = None,
+    ) -> torch.Tensor:
+        """It executes the evaluation of the data-driven mean-squared error
+
+        Args:
+            output_tilde (torch.Tensor): the output generated by
+                self.operator
+            target_data_tensor (torch.Tensor): the target tensor to be
+                compared with output_tilde
+
+        Returns:
+            torch.Tensor: the loss function value
+        """
+
+        output_split = torch.split(output_tilde, self.split_dim, dim=-1)
+        target_split = torch.split(target_data_tensor, self.split_dim, dim=-1)
+
+        data_losses = [
+            self.loss_evaluator_data((out_split, tgt_split))
+            / (self.norm_evaluator(tgt_split) or torch.tensor(1.0).to(self.device))
+            for i, (out_split, tgt_split) in enumerate(zip(output_split, target_split))
+        ]
+
+        return self.weighted_loss_evaluator(data_losses, weights)
+
+    def _data_loss_adaptive(
+        self,
+        output_tilde: torch.Tensor = None,
+        target_data_tensor: torch.Tensor = None,
+        **kwargs,
+    ) -> torch.Tensor:
+        """It executes the evaluation of the data-driven mean-squared error
+
+        Args:
+            output_tilde (torch.Tensor): the output generated by
+                self.operator
+            target_data_tensor (torch.Tensor): the target tensor to be
+                compared with output_tilde
+
+        Returns:
+            torch.Tensor: the loss function value
+        """
+
+        output_split = torch.split(output_tilde, self.split_dim, dim=-1)
+        target_split = torch.split(target_data_tensor, self.split_dim, dim=-1)
+
+        data_discrepancy = [
+            out_split - tgt_split
+            for i, (out_split, tgt_split) in enumerate(zip(output_split, target_split))
+        ]
+
+        weights = self.data_weights_estimator(
+            residual=data_discrepancy,
+            loss_evaluator=self.loss_evaluator,
+            loss_history=self.loss_states,
+            operator=self.operator,
+        )
+
+        data_losses = [
+            weights[i] * self.loss_evaluator_data((out_split, tgt_split))
+            for i, (out_split, tgt_split) in enumerate(zip(output_split, target_split))
+        ]
+
+        return [sum(data_losses)]
+
+    def _global_weights_bypass(
+        self, initial_penalty: float = None, **kwargs
+    ) -> List[float]:
+        return [1.0, initial_penalty, 1.0, 1.0]
+
+    def _global_weights_estimator(self, **kwargs) -> List[float]:
+        weights = self.global_weights_estimator(**kwargs)
+
+        return weights
+
+    def _residual_loss(
+        self, residual_approximation: List[torch.Tensor] = None, weights: list = None
+    ) -> List[torch.Tensor]:
+        """It evaluates the physics-driven residual loss
+
+        Args:
+            residual_approximation (List[torch.Tensor]): a list of
+                tensors containing the evaluation for the physical
+                residual for each sample in the dataset
+            weights (list): a list of weights used for rescaling the
+                residuals of each variable
+
+        Returns:
+            torch.Tensor: the list of residual losses
+        """
+        residual_losses = [self.loss_evaluator(res) for res in residual_approximation]
+
+        return self.weighted_loss_evaluator(residual_losses, weights)
+
+    def _residual_loss_adaptive(
+        self, residual_approximation: List[torch.Tensor] = None, weights: list = None
+    ) -> List[torch.Tensor]:
+        """It evaluates the physics-driven residual loss
+
+        Args:
+            residual_approximation (List[torch.Tensor]): a list of
+                tensors containing the evaluation for the physical
+                residual for each sample in the dataset
+            weights (list): a list of weights used for rescaling the
+                residuals of each variable
+
+        Returns:
+            torch.Tensor: the list of residual losses
+        """
+
+        weights = self.residual_weights_estimator(
+            residual=residual_approximation,
+            loss_evaluator=self.loss_evaluator,
+            loss_history=self.loss_states,
+            operator=self.operator,
+        )
+
+        residual_loss = [
+            weight * self.loss_evaluator(res)
+            for weight, res in zip(weights, residual_approximation)
+        ]
+
+        return [sum(residual_loss)]
+
+    def _extra_data(
+        self, input_data: torch.Tensor = None, target_data: torch.Tensor = None
+    ) -> torch.Tensor:
+        # Evaluating data for the initial condition
+        output_tilde = self.operator(input_data=input_data)
+
+        # Evaluating loss approximation for extra data
+        data_loss = self._data_loss(
+            output_tilde=output_tilde, target_data_tensor=target_data
+        )
+
+        return data_loss
+
+    def _boundary_penalisation(
+        self, boundary_input: dict = None, residual: SymbolicOperator = None
+    ) -> List[torch.Tensor]:
+        """It applies the boundary conditions
+
+        Args:
+            boundary_input (dict): a dictionary containing the
+                coordinates of the boundaries
+            residual (SymbolicOperator): a symbolic expression for the
+                boundary condition
+
+        Returns:
+            list: the evaluation of each boundary condition
+        """
+        return [
+            residual.eval_expression(k, boundary_input[k])
+            for k in boundary_input.keys()
+        ]
+
+    def _no_boundary_penalisation(
+        self, boundary_input: dict = None, residual: object = None
+    ) -> List[torch.Tensor]:
+        """It is used for cases in which no boundary condition is applied
+
+        """
+
+        return [torch.Tensor([0.0]).to(self.device) for k in boundary_input.keys()]
+
+    def _no_boundary(
+        self, boundary_input: dict = None, residual: object = None
+    ) -> List[torch.Tensor]:
+        """It is used for cases where there are not boundaries
+
+        """
+
+        return torch.Tensor([0.0]).to(self.device)
+
+    def _no_extra_data(
+        self, input_data: torch.Tensor = None, target_data: torch.Tensor = None
+    ) -> torch.Tensor:
+        return torch.Tensor([0.0]).to(self.device)
+
+    def _no_residual_wrapper(self, input_data: torch.Tensor = None) -> torch.Tensor:
+        return self.residual(input_data)
+
+    def _causality_preserving_residual_wrapper(
+        self, input_data: torch.Tensor = None
+    ) -> List:
+        return self.causality_preserving(self.residual(input_data))
+
+    def _filter_necessary_loss_terms(self, residual: SymbolicOperator = None):
+        tags = ["pde", "init"]
+        indices = [0, 1]
+
+        if residual.g_expressions:
+            tags.append("bound")
+            indices.append(2)
+        else:
+            pass
+
+        if self.hybrid_data_pinn:
+            tags.append("extra_data")
+            indices.append(3)
+        else:
+            pass
+
+        return tags, indices
+
+    def _losses_states_str(self, tags: List[str] = None):
+        losses_str = "\r"
+        for item in tags:
+            losses_str += f"{item}:{{}} "
+
+        return losses_str
+
+    def __call__(
+        self,
+        input_data: Union[dict, torch.Tensor] = None,
+        target_data: Union[dict, torch.Tensor] = None,
+        verbose: bool = False,
+        call_back: str = "",
+        residual: Callable = None,
+        initial_input: Union[dict, torch.Tensor] = None,
+        initial_state: Union[dict, torch.Tensor] = None,
+        boundary_input: dict = None,
+        boundary_penalties: list = [1],
+        extra_input_data: Union[dict, torch.Tensor] = None,
+        extra_target_data: Union[dict, torch.Tensor] = None,
+        initial_penalty: float = 1,
+        axis: int = -1,
+        relative: bool = False,
+        lambda_1: float = 0.0,
+        lambda_2: float = 0.0,
+        weights=None,
+        weights_residual=None,
+        device: str = "cpu",
+        split_losses: bool = False,
+        causality_preserving: Callable = None,
+        global_weights_estimator: Callable = None,
+        residual_weights_estimator: Callable = None,
+        data_weights_estimator: Callable = None,
+        use_mean: bool = True,
+        use_data_log: bool = False,
+    ) -> Callable:
+        self.residual = residual
+
+        self.device = device
+
+        self.causality_preserving = causality_preserving
+
+        # Handling expection when AnnealingWeights and split_losses
+        # are used together.
+        if isinstance(global_weights_estimator, AnnealingWeights):
+            if split_losses:
+                raise RuntimeError(
+                    "Global weights estimator, AnnealingWeights, is not"
+                    + "compatible with split loss terms."
+                )
+            else:
+                pass
+
+        self.global_weights_estimator = global_weights_estimator
+
+        self.residual_weights_estimator = residual_weights_estimator
+
+        self.data_weights_estimator = data_weights_estimator
+
+        if split_losses:
+            self.weighted_loss_evaluator = self._bypass_weighted_loss
+        else:
+            self.weighted_loss_evaluator = self._eval_weighted_loss
+
+        if (
+            isinstance(extra_input_data, np.ndarray)
+            == isinstance(extra_target_data, np.ndarray)
+            == True
+        ):
+            self.hybrid_data_pinn = True
+        else:
+            pass
+
+        # When no weight is provided, they are
+        # set to the default choice
+        if weights is None:
+            weights = len(residual.output_names) * [1]
+
+        if weights_residual is None:
+            weights_residual = len(residual.output_names) * [1]
+
+        loss_tags, loss_indices = self._filter_necessary_loss_terms(residual=residual)
+        loss_str = self._losses_states_str(tags=loss_tags)
+
+        # Boundary conditions are optional, since they are not
+        # defined in some cases, as ODE, for example.
+        if residual.g_expressions:
+            boundary = self._boundary_penalisation
+        else:
+            if boundary_input == None:
+                boundary = self._no_boundary
+            else:
+                boundary = self._no_boundary_penalisation
+
+        if self.causality_preserving:
+            call_back = f", causality_weights: {self.causality_preserving.call_back}"
+            self.residual_wrapper = self._causality_preserving_residual_wrapper
+
+        else:
+            self.residual_wrapper = self._no_residual_wrapper
+
+        l1_reg_multiplication = self._exec_multiplication_in_regularization(
+            lambda_type=type(lambda_1), term_type=type(self.operator.weights_l1)
+        )
+
+        l2_reg_multiplication = self._exec_multiplication_in_regularization(
+            lambda_type=type(lambda_2), term_type=type(self.operator.weights_l2)
+        )
+        if type(input_data) is dict:
+            try:
+                input_data = input_data["input_data"]
+            except Exception:
+                pass
+
+        initial_input, initial_state = self._to_tensor(
+            initial_input, initial_state, device=device
+        )
+
+        # Preparing extra data, when necessary
+        if self.hybrid_data_pinn:
+            extra_input_data, extra_target_data = self._to_tensor(
+                extra_input_data, extra_target_data, device=device
+            )
+            self.extra_data = self._extra_data
+        else:
+            self.extra_data = self._no_extra_data
+
+        if use_data_log == True:
+            self.inner_square = self._two_term_log_loss
+        else:
+            self.inner_square = self._two_term_loss
+
+        if use_mean == True:
+            self.loss_evaluator = lambda res: torch.mean(self._single_term_loss(res))
+        else:
+            self.loss_evaluator = lambda res: torch.sum(self._single_term_loss(res))
+
+        if use_mean == True:
+            self.loss_evaluator_data = lambda res: torch.mean(self.inner_square(*res))
+        else:
+            self.loss_evaluator_data = lambda res: torch.sum(self.inner_square(*res))
+
+        # Relative norm or not
+        if relative == True:
+            if use_mean == True:
+                self.norm_evaluator = lambda ref: torch.mean(torch.square((ref)))
+            else:
+                self.norm_evaluator = lambda ref: torch.sum(torch.square((ref)))
+        else:
+            self.norm_evaluator = lambda ref: 1
+
+        # Determing the usage of special residual loss weighting
+        if residual_weights_estimator:
+            self.residual_loss = self._residual_loss_adaptive
+        else:
+            self.residual_loss = self._residual_loss
+
+        # Determing the usage of special data loss weighting
+        if data_weights_estimator:
+            self.data_loss = self._data_loss_adaptive
+        else:
+            self.data_loss = self._data_loss
+
+        # Determining the usage of special global loss weighting
+        if global_weights_estimator:
+            self.global_weights = self._global_weights_estimator
+        else:
+            self.global_weights = self._global_weights_bypass
+
+        if verbose:
+            self.pprint = self._pprint_verbose
+        else:
+            self.pprint = self._pprint_simple
+
+        def closure():
+            # Executing the symbolic residual evaluation
+            residual_approximation = self.residual_wrapper(input_data)
+
+            # Boundary, if appliable
+            boundary_approximation = boundary(
+                boundary_input=boundary_input, residual=residual
+            )
+
+            # Evaluating data for the initial condition
+            initial_output_tilde = self.operator(input_data=initial_input)
+
+            # Evaluating loss function for residual
+            residual_loss = self.residual_loss(
+                residual_approximation=residual_approximation, weights=weights_residual
+            )
+
+            # Evaluating loss for the boundary approaximation, if appliable
+            boundary_loss = self._residual_loss(
+                residual_approximation=boundary_approximation,
+                weights=boundary_penalties,
+            )
+
+            # Evaluating loss approximation for initial condition
+            initial_data_loss = self.data_loss(
+                output_tilde=initial_output_tilde,
+                target_data_tensor=initial_state,
+                weights=weights,
+            )
+
+            # Evaluating extra data loss, when appliable
+            extra_data = self.extra_data(
+                input_data=extra_input_data, target_data=extra_target_data
+            )
+
+            # L² and L¹ regularization term
+            weights_l2 = self.operator.weights_l2
+            weights_l1 = self.operator.weights_l1
+
+            # beta *||W||_2 + alpha * ||W||_1
+            l2_reg = l2_reg_multiplication(lambda_2, weights_l2)
+            l1_reg = l1_reg_multiplication(lambda_1, weights_l1)
+
+            # The complete loss function
+            pde = residual_loss
+            init = initial_data_loss
+            bound = boundary_loss
+
+            loss_terms = self._aggregate_terms(*pde, *init, *bound, *extra_data)
+
+            # Updating the loss weights if necessary
+            loss_weights = self.global_weights(
+                initial_penalty=initial_penalty,
+                operator=self.operator,
+                loss_evaluator=self.loss_evaluator,
+                residual=loss_terms,
+            )
+            # Overall loss function
+            loss = (
+                sum(self._eval_weighted_loss(loss_terms, loss_weights))
+                + l2_reg
+                + l1_reg
+            )
+
+            # Back-propagation
+            loss.backward()
+
+            pde_detach = float(sum(pde).detach().data)
+            init_detach = float(sum(init).detach().data)
+            bound_detach = float(sum(bound).detach().data)
+            extra_data_detach = float(sum(extra_data).detach().data)
+
+            self.loss_states["pde"].append(pde_detach)
+            self.loss_states["init"].append(init_detach)
+            self.loss_states["bound"].append(bound_detach)
+            self.loss_states["extra_data"].append(extra_data_detach)
+
+            losses_list = np.array(
+                [pde_detach, init_detach, bound_detach, extra_data_detach]
+            )
+
+            self.pprint(
+                loss_str=loss_str,
+                losses_list=losses_list,
+                call_back=call_back,
+                loss_indices=loss_indices,
+                loss_terms=loss_terms,
+                loss_weights=loss_weights,
+            )
+
+            _current_loss = loss
+
+            return _current_loss
+
+        return closure
+
+
+ + + +
+ + + + + + + + + + +
+ + + + +

+ __init__(operator=None) + +#

+ + +
+ +

Physics-Informed mean-squared error loss function

+ + + +

Parameters:

+ + + + + + + + + + + + + + + + + +
NameTypeDescriptionDefault
operator + Module + +
+

the operator used for evaluating +the loss function (usually a neural network)

+
+
+ None +
+ +
+ Source code in simulai/optimization/_losses.py +
468
+469
+470
+471
+472
+473
+474
+475
+476
+477
+478
+479
+480
+481
+482
+483
+484
+485
+486
+487
+488
+489
+490
+491
+492
+493
+494
+495
+496
+497
+498
+499
+500
+501
+502
def __init__(self, operator: torch.nn.Module = None) -> None:
+    """Physics-Informed mean-squared error loss function
+
+    Args:
+        operator (torch.nn.Module): the operator used for evaluating
+            the loss function (usually a neural network)
+    """
+
+    super().__init__()
+
+    self.split_dim = 1
+    self.operator = operator
+    self.loss_evaluator = None
+    self.residual = None
+    self.tol = 1e-15
+    self.device = None
+
+    self.axis_loss_evaluator = lambda res: torch.mean(torch.square((res)), dim=1)
+
+    self.loss_states = {
+        "pde": list(),
+        "init": list(),
+        "bound": list(),
+        "extra_data": list(),
+    }
+    self.loss_tags = list(self.loss_states.keys())
+    self.hybrid_data_pinn = False
+
+    self.losses_terms_indices = {
+        "pde": 0,
+        "init": 1,
+        "bound": 2,
+        "extra_data": 3,
+        "causality_weights": 4,
+    }
+
+
+
+ +
+ + + +
+ +
+ +

+
+
+ +
+ + + + + + + + + + + + +
+ + + + + + + + + + \ No newline at end of file diff --git a/simulai_optimization/simulai_optimizer/index.html b/simulai_optimization/simulai_optimizer/index.html index f1746f7..6cc5d4d 100644 --- a/simulai_optimization/simulai_optimizer/index.html +++ b/simulai_optimization/simulai_optimizer/index.html @@ -1,7 +1,7 @@ - Optimizer - SimulAI's Documentation + Optimization Interfaces - SimulAI's Documentation @@ -244,7 +244,26 @@ @@ -332,11 +351,11 @@
-

Optimizer

+

Optimization Interfaces

-

Optimizer#

-

Optimizer#

+

Optimization Interfaces#

+

Optimizer#

@@ -3914,7 +3933,7 @@

diff --git a/simulai_parallel/index.html b/simulai_parallel/index.html index 6aea145..13fc880 100644 --- a/simulai_parallel/index.html +++ b/simulai_parallel/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_regression/simulai_dense/index.html b/simulai_regression/simulai_dense/index.html index 81daaf1..e685fc9 100644 --- a/simulai_regression/simulai_dense/index.html +++ b/simulai_regression/simulai_dense/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_regression/simulai_opinf/index.html b/simulai_regression/simulai_opinf/index.html index 1743f48..b12f1c0 100644 --- a/simulai_regression/simulai_opinf/index.html +++ b/simulai_regression/simulai_opinf/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_residuals/index.html b/simulai_residuals/index.html index 53e8b3d..5774b51 100644 --- a/simulai_residuals/index.html +++ b/simulai_residuals/index.html @@ -244,7 +244,26 @@ diff --git a/simulai_rom/simulai_rom/index.html b/simulai_rom/simulai_rom/index.html index 9afac35..4a9e512 100644 --- a/simulai_rom/simulai_rom/index.html +++ b/simulai_rom/simulai_rom/index.html @@ -244,7 +244,26 @@ diff --git a/sitemap.xml.gz b/sitemap.xml.gz index 42bf226881023c3e69dc4dd80d5ad904e2aa7740..995c512f1e95d7c7cc7d1c901d35cf1cffb81ba8 100644 GIT binary patch delta 12 Tcmb=gXOr*d;7HJ)$W{pe7F+{f delta 12 Tcmb=gXOr*d;NVc6$W{pe6m$am