diff --git a/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip b/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip index a5c7a48..a16f398 100644 Binary files a/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip and b/dev/_downloads/07fcc19ba03226cd3d83d4e40ec44385/auto_examples_python.zip differ diff --git a/dev/_downloads/082e73328a5caf8c1fe9ad7fe05cf68f/plot_incremental_FNO_darcy.ipynb b/dev/_downloads/082e73328a5caf8c1fe9ad7fe05cf68f/plot_incremental_FNO_darcy.ipynb index eee271e..57d378a 100644 --- a/dev/_downloads/082e73328a5caf8c1fe9ad7fe05cf68f/plot_incremental_FNO_darcy.ipynb +++ b/dev/_downloads/082e73328a5caf8c1fe9ad7fe05cf68f/plot_incremental_FNO_darcy.ipynb @@ -197,7 +197,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/0ac9f102e4f1903984e3b4a7d517c88f/plot_darcy_flow.ipynb b/dev/_downloads/0ac9f102e4f1903984e3b4a7d517c88f/plot_darcy_flow.ipynb index 7c1f97f..356d7d7 100644 --- a/dev/_downloads/0ac9f102e4f1903984e3b4a7d517c88f/plot_darcy_flow.ipynb +++ b/dev/_downloads/0ac9f102e4f1903984e3b4a7d517c88f/plot_darcy_flow.ipynb @@ -78,7 +78,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/0d78e075dd52a34e158d7f5f710dfe89/plot_incremental_FNO_darcy.zip b/dev/_downloads/0d78e075dd52a34e158d7f5f710dfe89/plot_incremental_FNO_darcy.zip new file mode 100644 index 0000000..3aca227 Binary files /dev/null and b/dev/_downloads/0d78e075dd52a34e158d7f5f710dfe89/plot_incremental_FNO_darcy.zip differ diff --git a/dev/_downloads/1a3050d57a180b92b424ce128dfe1d36/plot_FNO_darcy.py b/dev/_downloads/1a3050d57a180b92b424ce128dfe1d36/plot_FNO_darcy.py index bccb871..780f857 100644 --- a/dev/_downloads/1a3050d57a180b92b424ce128dfe1d36/plot_FNO_darcy.py +++ b/dev/_downloads/1a3050d57a180b92b424ce128dfe1d36/plot_FNO_darcy.py @@ -1,18 +1,19 @@ """ -Training an FNO on Darcy-Flow +Training a TFNO on Darcy-Flow ============================= In this example, we demonstrate how to use the small Darcy-Flow example we ship with the package -to train a Fourier-Neural Operator +to train a Tensorized Fourier-Neural Operator """ # %% # + import torch import matplotlib.pyplot as plt import sys -from neuralop.models import FNO +from neuralop.models import TFNO from neuralop import Trainer from neuralop.training import AdamW from neuralop.data.datasets import load_darcy_flow_small @@ -33,13 +34,9 @@ # %% -# We create a simple FNO model +# We create a tensorized FNO model -model = FNO(n_modes=(16, 16), - in_channels=1, - out_channels=1, - hidden_channels=32, - projection_channels=64) +model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42) model = model.to(device) n_params = count_model_params(model) diff --git a/dev/_downloads/20c43dd37baf603889c4dc23e93bdb60/plot_count_flops.zip b/dev/_downloads/20c43dd37baf603889c4dc23e93bdb60/plot_count_flops.zip new file mode 100644 index 0000000..ce3a659 Binary files /dev/null and b/dev/_downloads/20c43dd37baf603889c4dc23e93bdb60/plot_count_flops.zip differ diff --git a/dev/_downloads/2a3ecbdce9fd535c53d44cc373f6a228/checkpoint_FNO_darcy.py b/dev/_downloads/2a3ecbdce9fd535c53d44cc373f6a228/checkpoint_FNO_darcy.py index 44c1797..5665d56 100644 --- a/dev/_downloads/2a3ecbdce9fd535c53d44cc373f6a228/checkpoint_FNO_darcy.py +++ b/dev/_downloads/2a3ecbdce9fd535c53d44cc373f6a228/checkpoint_FNO_darcy.py @@ -33,14 +33,7 @@ # %% # We create a tensorized FNO model -model = TFNO(n_modes=(16, 16), - in_channels=1, - out_channels=1, - hidden_channels=32, - projection_channels=64, - factorization='tucker', - rank=0.42) - +model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42) model = model.to(device) n_params = count_model_params(model) diff --git a/dev/_downloads/3864a2d85c7ce11adeac9580559229ab/plot_darcy_flow.zip b/dev/_downloads/3864a2d85c7ce11adeac9580559229ab/plot_darcy_flow.zip new file mode 100644 index 0000000..655bdf3 Binary files /dev/null and b/dev/_downloads/3864a2d85c7ce11adeac9580559229ab/plot_darcy_flow.zip differ diff --git a/dev/_downloads/3faf9d2eaee5cc8e9f1c631c002ce544/plot_darcy_flow_spectrum.zip b/dev/_downloads/3faf9d2eaee5cc8e9f1c631c002ce544/plot_darcy_flow_spectrum.zip new file mode 100644 index 0000000..ec544d4 Binary files /dev/null and b/dev/_downloads/3faf9d2eaee5cc8e9f1c631c002ce544/plot_darcy_flow_spectrum.zip differ diff --git a/dev/_downloads/52640fe09fbb5b08e5a2370e57b3b066/checkpoint_FNO_darcy.ipynb b/dev/_downloads/52640fe09fbb5b08e5a2370e57b3b066/checkpoint_FNO_darcy.ipynb index bf97b3d..38b4850 100644 --- a/dev/_downloads/52640fe09fbb5b08e5a2370e57b3b066/checkpoint_FNO_darcy.ipynb +++ b/dev/_downloads/52640fe09fbb5b08e5a2370e57b3b066/checkpoint_FNO_darcy.ipynb @@ -51,7 +51,7 @@ }, "outputs": [], "source": [ - "model = TFNO(n_modes=(16, 16),\n in_channels=1, \n out_channels=1, \n hidden_channels=32, \n projection_channels=64, \n factorization='tucker', \n rank=0.42)\n\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" + "model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" ] }, { @@ -154,7 +154,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/5e60095ce99919773daa83384f767e02/plot_SFNO_swe.zip b/dev/_downloads/5e60095ce99919773daa83384f767e02/plot_SFNO_swe.zip new file mode 100644 index 0000000..a7a7cb2 Binary files /dev/null and b/dev/_downloads/5e60095ce99919773daa83384f767e02/plot_SFNO_swe.zip differ diff --git a/dev/_downloads/645da00b8fbbb9bb5cae877fd0f31635/plot_FNO_darcy.zip b/dev/_downloads/645da00b8fbbb9bb5cae877fd0f31635/plot_FNO_darcy.zip new file mode 100644 index 0000000..918ad20 Binary files /dev/null and b/dev/_downloads/645da00b8fbbb9bb5cae877fd0f31635/plot_FNO_darcy.zip differ diff --git a/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip b/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip index 03cc577..84ff71c 100644 Binary files a/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip and b/dev/_downloads/6f1e7a639e0699d6164445b55e6c116d/auto_examples_jupyter.zip differ diff --git a/dev/_downloads/7296405f6df7c2cfe184e9b258cee33e/checkpoint_FNO_darcy.zip b/dev/_downloads/7296405f6df7c2cfe184e9b258cee33e/checkpoint_FNO_darcy.zip new file mode 100644 index 0000000..195154a Binary files /dev/null and b/dev/_downloads/7296405f6df7c2cfe184e9b258cee33e/checkpoint_FNO_darcy.zip differ diff --git a/dev/_downloads/84c435865e4e2910253a980881498782/plot_count_flops.ipynb b/dev/_downloads/84c435865e4e2910253a980881498782/plot_count_flops.ipynb index f599a65..3b0867c 100644 --- a/dev/_downloads/84c435865e4e2910253a980881498782/plot_count_flops.ipynb +++ b/dev/_downloads/84c435865e4e2910253a980881498782/plot_count_flops.ipynb @@ -15,7 +15,7 @@ }, "outputs": [], "source": [ - "from copy import deepcopy\nimport torch\nfrom torchtnt.utils.flops import FlopTensorDispatchMode\n\nfrom neuralop.models import FNO\n\ndevice = 'cpu'\n\nfno = FNO(n_modes=(64,64), \n in_channels=1, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64)\n\nbatch_size = 4\nmodel_input = torch.randn(batch_size, 1, 128, 128)\n\n\nwith FlopTensorDispatchMode(fno) as ftdm:\n # count forward flops\n res = fno(model_input).mean()\n fno_forward_flops = deepcopy(ftdm.flop_counts)\n \n ftdm.reset()\n res.backward()\n fno_backward_flops = deepcopy(ftdm.flop_counts)" + "from copy import deepcopy\nimport torch\nfrom torchtnt.utils.flops import FlopTensorDispatchMode\n\nfrom neuralop.models import FNO\n\ndevice = 'cpu'\n\nfno = FNO(n_modes=(64,64), \n in_channels=3, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64)\n\nbatch_size = 4\nmodel_input = torch.randn(batch_size, 3, 128, 128)\n\n\nwith FlopTensorDispatchMode(fno) as ftdm:\n # count forward flops\n res = fno(model_input).mean()\n fno_forward_flops = deepcopy(ftdm.flop_counts)\n \n ftdm.reset()\n res.backward()\n fno_backward_flops = deepcopy(ftdm.flop_counts)" ] }, { @@ -71,7 +71,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/969baa48c90c32befa7c7d2590c8ca42/darcy_data_processor.ipynb b/dev/_downloads/969baa48c90c32befa7c7d2590c8ca42/darcy_data_processor.ipynb deleted file mode 100644 index dec0ca4..0000000 --- a/dev/_downloads/969baa48c90c32befa7c7d2590c8ca42/darcy_data_processor.ipynb +++ /dev/null @@ -1,72 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "\n# Data Processors\n\nIn this example, we demonstrate how to use neuralop.data.transforms.DataProcessor\nto preprocess and postprocess the small Darcy Flow example we ship with the package\nfor downstream use in training a neural operator model. \n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import TFNO\nfrom neuralop import Trainer\nfrom neuralop.training import CheckpointCallback\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'\n\n\"\"\"\nFirst, let's load the small Darcy Flow dataset:\n\"\"\"" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Loading the Navier-Stokes dataset in 128x128 resolution\n\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "train_loader, test_loaders, data_processor = load_darcy_flow_small(\n n_train=1000, batch_size=32, \n test_resolutions=[16, 32], n_tests=[100, 50],\n test_batch_sizes=[32, 32],\n data_root=\"../neuralop/data/datasets/data/\"\n)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "collapsed": false - }, - "outputs": [], - "source": [ - "\"\"\"\nNext let's visualize the data in its raw form.\n\"\"\"" - ] - } - ], - "metadata": { - "kernelspec": { - "display_name": "Python 3", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.14" - } - }, - "nbformat": 4, - "nbformat_minor": 0 -} \ No newline at end of file diff --git a/dev/_downloads/af3a515d2684655c6a5a8e0df87a4cf9/plot_SFNO_swe.ipynb b/dev/_downloads/af3a515d2684655c6a5a8e0df87a4cf9/plot_SFNO_swe.ipynb index 678da75..e66e6b2 100644 --- a/dev/_downloads/af3a515d2684655c6a5a8e0df87a4cf9/plot_SFNO_swe.ipynb +++ b/dev/_downloads/af3a515d2684655c6a5a8e0df87a4cf9/plot_SFNO_swe.ipynb @@ -51,7 +51,7 @@ }, "outputs": [], "source": [ - "model = SFNO(n_modes=(32, 32),\n in_channels=3,\n out_channels=3,\n hidden_channels=32,\n projection_channels=64,\n factorization='dense')\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" + "model = SFNO(n_modes=(32, 32), in_channels=3, out_channels=3, hidden_channels=32, projection_channels=64, factorization='dense')\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" ] }, { @@ -172,7 +172,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/b84984d89c00aa83f7c56c96a61b8aee/darcy_data_processor.py b/dev/_downloads/b84984d89c00aa83f7c56c96a61b8aee/darcy_data_processor.py deleted file mode 100644 index 14cce33..0000000 --- a/dev/_downloads/b84984d89c00aa83f7c56c96a61b8aee/darcy_data_processor.py +++ /dev/null @@ -1,39 +0,0 @@ -""" -Data Processors -============================= - -In this example, we demonstrate how to use neuralop.data.transforms.DataProcessor -to preprocess and postprocess the small Darcy Flow example we ship with the package -for downstream use in training a neural operator model. -""" - -# %% -# -import torch -import matplotlib.pyplot as plt -import sys -from neuralop.models import TFNO -from neuralop import Trainer -from neuralop.training import CheckpointCallback -from neuralop.data.datasets import load_darcy_flow_small -from neuralop.utils import count_model_params -from neuralop import LpLoss, H1Loss - -device = 'cpu' - -""" -First, let's load the small Darcy Flow dataset: -""" -# %% -# Loading the Navier-Stokes dataset in 128x128 resolution -train_loader, test_loaders, data_processor = load_darcy_flow_small( - n_train=1000, batch_size=32, - test_resolutions=[16, 32], n_tests=[100, 50], - test_batch_sizes=[32, 32], - data_root="../neuralop/data/datasets/data/" -) - -# %% -""" -Next let's visualize the data in its raw form. -""" \ No newline at end of file diff --git a/dev/_downloads/be42c4c413e9b89016fa3a4984cb9758/plot_SFNO_swe.py b/dev/_downloads/be42c4c413e9b89016fa3a4984cb9758/plot_SFNO_swe.py index 64cd52c..cdafa52 100644 --- a/dev/_downloads/be42c4c413e9b89016fa3a4984cb9758/plot_SFNO_swe.py +++ b/dev/_downloads/be42c4c413e9b89016fa3a4984cb9758/plot_SFNO_swe.py @@ -31,12 +31,7 @@ # %% # We create a tensorized FNO model -model = SFNO(n_modes=(32, 32), - in_channels=3, - out_channels=3, - hidden_channels=32, - projection_channels=64, - factorization='dense') +model = SFNO(n_modes=(32, 32), in_channels=3, out_channels=3, hidden_channels=32, projection_channels=64, factorization='dense') model = model.to(device) n_params = count_model_params(model) diff --git a/dev/_downloads/c628421a5214dc8f1b3bde8be930715f/plot_darcy_flow_spectrum.ipynb b/dev/_downloads/c628421a5214dc8f1b3bde8be930715f/plot_darcy_flow_spectrum.ipynb index a422a26..057afa9 100644 --- a/dev/_downloads/c628421a5214dc8f1b3bde8be930715f/plot_darcy_flow_spectrum.ipynb +++ b/dev/_downloads/c628421a5214dc8f1b3bde8be930715f/plot_darcy_flow_spectrum.ipynb @@ -107,7 +107,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/cefc537c5730a6b3e916b83c1fd313d6/plot_UNO_darcy.zip b/dev/_downloads/cefc537c5730a6b3e916b83c1fd313d6/plot_UNO_darcy.zip new file mode 100644 index 0000000..2f9149d Binary files /dev/null and b/dev/_downloads/cefc537c5730a6b3e916b83c1fd313d6/plot_UNO_darcy.zip differ diff --git a/dev/_downloads/de69282d3144c5a2b675c6f6338237c1/plot_count_flops.py b/dev/_downloads/de69282d3144c5a2b675c6f6338237c1/plot_count_flops.py index 804156a..b8336de 100644 --- a/dev/_downloads/de69282d3144c5a2b675c6f6338237c1/plot_count_flops.py +++ b/dev/_downloads/de69282d3144c5a2b675c6f6338237c1/plot_count_flops.py @@ -19,13 +19,13 @@ device = 'cpu' fno = FNO(n_modes=(64,64), - in_channels=1, + in_channels=3, out_channels=1, hidden_channels=64, projection_channels=64) batch_size = 4 -model_input = torch.randn(batch_size, 1, 128, 128) +model_input = torch.randn(batch_size, 3, 128, 128) with FlopTensorDispatchMode(fno) as ftdm: diff --git a/dev/_downloads/e81e8f640b2a3cda84c542bbc8f36a54/plot_FNO_darcy.ipynb b/dev/_downloads/e81e8f640b2a3cda84c542bbc8f36a54/plot_FNO_darcy.ipynb index 4e68904..50ffcf5 100644 --- a/dev/_downloads/e81e8f640b2a3cda84c542bbc8f36a54/plot_FNO_darcy.ipynb +++ b/dev/_downloads/e81e8f640b2a3cda84c542bbc8f36a54/plot_FNO_darcy.ipynb @@ -4,7 +4,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "\n# Training an FNO on Darcy-Flow\n\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package\nto train a Fourier-Neural Operator\n" + "\n# Training a TFNO on Darcy-Flow\n\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package\nto train a Tensorized Fourier-Neural Operator\n" ] }, { @@ -15,7 +15,7 @@ }, "outputs": [], "source": [ - "import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import FNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'" + "import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import TFNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'" ] }, { @@ -40,7 +40,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "We create a simple FNO model\n\n" + "We create a tensorized FNO model\n\n" ] }, { @@ -51,7 +51,7 @@ }, "outputs": [], "source": [ - "model = FNO(n_modes=(16, 16),\n in_channels=1, \n out_channels=1,\n hidden_channels=32, \n projection_channels=64)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" + "model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" ] }, { @@ -172,7 +172,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/ecc34f03fb30bee651bda959149422c4/plot_UNO_darcy.ipynb b/dev/_downloads/ecc34f03fb30bee651bda959149422c4/plot_UNO_darcy.ipynb index 009da75..d004b54 100644 --- a/dev/_downloads/ecc34f03fb30bee651bda959149422c4/plot_UNO_darcy.ipynb +++ b/dev/_downloads/ecc34f03fb30bee651bda959149422c4/plot_UNO_darcy.ipynb @@ -33,7 +33,7 @@ }, "outputs": [], "source": [ - "train_loader, test_loaders, data_processor = load_darcy_flow_small(\n n_train=1000, batch_size=32, \n test_resolutions=[16, 32], n_tests=[100, 50],\n test_batch_sizes=[32, 32],\n)\n\nmodel = UNO(in_channels=1, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64,\n uno_out_channels=[32,64,64,64,32],\n uno_n_modes=[[16,16],[8,8],[8,8],[8,8],[16,16]],\n uno_scalings=[[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],\n horizontal_skips_map=None,\n channel_mlp_skip=\"linear\",\n n_layers = 5,\n domain_padding=0.2)\n\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" + "train_loader, test_loaders, data_processor = load_darcy_flow_small(\n n_train=1000, batch_size=32, \n test_resolutions=[16, 32], n_tests=[100, 50],\n test_batch_sizes=[32, 32],\n)\n\n\n\nmodel = UNO(in_channels=1, out_channels=1, hidden_channels=64, projection_channels=64,uno_out_channels = [32,64,64,64,32], \\\n uno_n_modes= [[16,16],[8,8],[8,8],[8,8],[16,16]], uno_scalings= [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],\\\n horizontal_skips_map = None, n_layers = 5, domain_padding = 0.2)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()" ] }, { @@ -154,7 +154,7 @@ "name": "python", "nbconvert_exporter": "python", "pygments_lexer": "ipython3", - "version": "3.10.14" + "version": "3.9.19" } }, "nbformat": 4, diff --git a/dev/_downloads/f9c1e50d48828a746410d5400feb61f7/plot_UNO_darcy.py b/dev/_downloads/f9c1e50d48828a746410d5400feb61f7/plot_UNO_darcy.py index f941551..a91cb27 100644 --- a/dev/_downloads/f9c1e50d48828a746410d5400feb61f7/plot_UNO_darcy.py +++ b/dev/_downloads/f9c1e50d48828a746410d5400feb61f7/plot_UNO_darcy.py @@ -31,18 +31,11 @@ test_batch_sizes=[32, 32], ) -model = UNO(in_channels=1, - out_channels=1, - hidden_channels=64, - projection_channels=64, - uno_out_channels=[32,64,64,64,32], - uno_n_modes=[[16,16],[8,8],[8,8],[8,8],[16,16]], - uno_scalings=[[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]], - horizontal_skips_map=None, - channel_mlp_skip="linear", - n_layers = 5, - domain_padding=0.2) + +model = UNO(in_channels=1, out_channels=1, hidden_channels=64, projection_channels=64,uno_out_channels = [32,64,64,64,32], \ + uno_n_modes= [[16,16],[8,8],[8,8],[8,8],[16,16]], uno_scalings= [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],\ + horizontal_skips_map = None, n_layers = 5, domain_padding = 0.2) model = model.to(device) n_params = count_model_params(model) diff --git a/dev/_images/sphx_glr_plot_FNO_darcy_001.png b/dev/_images/sphx_glr_plot_FNO_darcy_001.png index b2375ab..2b56d68 100644 Binary files a/dev/_images/sphx_glr_plot_FNO_darcy_001.png and b/dev/_images/sphx_glr_plot_FNO_darcy_001.png differ diff --git a/dev/_images/sphx_glr_plot_FNO_darcy_thumb.png b/dev/_images/sphx_glr_plot_FNO_darcy_thumb.png index 06497d0..6d50e89 100644 Binary files a/dev/_images/sphx_glr_plot_FNO_darcy_thumb.png and b/dev/_images/sphx_glr_plot_FNO_darcy_thumb.png differ diff --git a/dev/_images/sphx_glr_plot_SFNO_swe_001.png b/dev/_images/sphx_glr_plot_SFNO_swe_001.png index 80668fd..c4e7f0e 100644 Binary files a/dev/_images/sphx_glr_plot_SFNO_swe_001.png and b/dev/_images/sphx_glr_plot_SFNO_swe_001.png differ diff --git a/dev/_images/sphx_glr_plot_SFNO_swe_thumb.png b/dev/_images/sphx_glr_plot_SFNO_swe_thumb.png index 8225626..cb54f81 100644 Binary files a/dev/_images/sphx_glr_plot_SFNO_swe_thumb.png and b/dev/_images/sphx_glr_plot_SFNO_swe_thumb.png differ diff --git a/dev/_images/sphx_glr_plot_UNO_darcy_001.png b/dev/_images/sphx_glr_plot_UNO_darcy_001.png index 627b188..c8a2a58 100644 Binary files a/dev/_images/sphx_glr_plot_UNO_darcy_001.png and b/dev/_images/sphx_glr_plot_UNO_darcy_001.png differ diff --git a/dev/_images/sphx_glr_plot_UNO_darcy_thumb.png b/dev/_images/sphx_glr_plot_UNO_darcy_thumb.png index 13a6cea..00e8f86 100644 Binary files a/dev/_images/sphx_glr_plot_UNO_darcy_thumb.png and b/dev/_images/sphx_glr_plot_UNO_darcy_thumb.png differ diff --git a/dev/_images/sphx_glr_plot_count_flops_thumb.png b/dev/_images/sphx_glr_plot_count_flops_thumb.png index b06c4e6..8a5fed5 100644 Binary files a/dev/_images/sphx_glr_plot_count_flops_thumb.png and b/dev/_images/sphx_glr_plot_count_flops_thumb.png differ diff --git a/dev/_images/sphx_glr_plot_darcy_flow_001.png b/dev/_images/sphx_glr_plot_darcy_flow_001.png index d2c951f..00fb270 100644 Binary files a/dev/_images/sphx_glr_plot_darcy_flow_001.png and b/dev/_images/sphx_glr_plot_darcy_flow_001.png differ diff --git a/dev/_images/sphx_glr_plot_darcy_flow_spectrum_001.png b/dev/_images/sphx_glr_plot_darcy_flow_spectrum_001.png index f64ec42..6391301 100644 Binary files a/dev/_images/sphx_glr_plot_darcy_flow_spectrum_001.png and b/dev/_images/sphx_glr_plot_darcy_flow_spectrum_001.png differ diff --git a/dev/_images/sphx_glr_plot_incremental_FNO_darcy_001.png b/dev/_images/sphx_glr_plot_incremental_FNO_darcy_001.png index 5ca6b9b..bd21717 100644 Binary files a/dev/_images/sphx_glr_plot_incremental_FNO_darcy_001.png and b/dev/_images/sphx_glr_plot_incremental_FNO_darcy_001.png differ diff --git a/dev/_images/sphx_glr_plot_incremental_FNO_darcy_thumb.png b/dev/_images/sphx_glr_plot_incremental_FNO_darcy_thumb.png index 0376d5d..dfa2561 100644 Binary files a/dev/_images/sphx_glr_plot_incremental_FNO_darcy_thumb.png and b/dev/_images/sphx_glr_plot_incremental_FNO_darcy_thumb.png differ diff --git a/dev/_modules/index.html b/dev/_modules/index.html index 9700acb..849ca00 100644 --- a/dev/_modules/index.html +++ b/dev/_modules/index.html @@ -16,7 +16,7 @@ - + @@ -115,7 +115,6 @@

All modules for which code is available