Skip to content

Commit

Permalink
Github action: auto-update.
Browse files Browse the repository at this point in the history
  • Loading branch information
github-actions[bot] committed Aug 28, 2024
1 parent 9b7705f commit 6d1bca0
Show file tree
Hide file tree
Showing 154 changed files with 1,372 additions and 7,249 deletions.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -197,7 +197,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -78,7 +78,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
@@ -1,18 +1,19 @@
"""
Training an FNO on Darcy-Flow
Training a TFNO on Darcy-Flow
=============================
In this example, we demonstrate how to use the small Darcy-Flow example we ship with the package
to train a Fourier-Neural Operator
to train a Tensorized Fourier-Neural Operator
"""

# %%
#


import torch
import matplotlib.pyplot as plt
import sys
from neuralop.models import FNO
from neuralop.models import TFNO
from neuralop import Trainer
from neuralop.training import AdamW
from neuralop.data.datasets import load_darcy_flow_small
Expand All @@ -33,13 +34,9 @@


# %%
# We create a simple FNO model
# We create a tensorized FNO model

model = FNO(n_modes=(16, 16),
in_channels=1,
out_channels=1,
hidden_channels=32,
projection_channels=64)
model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)
model = model.to(device)

n_params = count_model_params(model)
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -33,14 +33,7 @@
# %%
# We create a tensorized FNO model

model = TFNO(n_modes=(16, 16),
in_channels=1,
out_channels=1,
hidden_channels=32,
projection_channels=64,
factorization='tucker',
rank=0.42)

model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)
model = model.to(device)

n_params = count_model_params(model)
Expand Down
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
},
"outputs": [],
"source": [
"model = TFNO(n_modes=(16, 16),\n in_channels=1, \n out_channels=1, \n hidden_channels=32, \n projection_channels=64, \n factorization='tucker', \n rank=0.42)\n\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
"model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
]
},
{
Expand Down Expand Up @@ -154,7 +154,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
Binary file not shown.
Binary file not shown.
Binary file not shown.
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -15,7 +15,7 @@
},
"outputs": [],
"source": [
"from copy import deepcopy\nimport torch\nfrom torchtnt.utils.flops import FlopTensorDispatchMode\n\nfrom neuralop.models import FNO\n\ndevice = 'cpu'\n\nfno = FNO(n_modes=(64,64), \n in_channels=1, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64)\n\nbatch_size = 4\nmodel_input = torch.randn(batch_size, 1, 128, 128)\n\n\nwith FlopTensorDispatchMode(fno) as ftdm:\n # count forward flops\n res = fno(model_input).mean()\n fno_forward_flops = deepcopy(ftdm.flop_counts)\n \n ftdm.reset()\n res.backward()\n fno_backward_flops = deepcopy(ftdm.flop_counts)"
"from copy import deepcopy\nimport torch\nfrom torchtnt.utils.flops import FlopTensorDispatchMode\n\nfrom neuralop.models import FNO\n\ndevice = 'cpu'\n\nfno = FNO(n_modes=(64,64), \n in_channels=3, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64)\n\nbatch_size = 4\nmodel_input = torch.randn(batch_size, 3, 128, 128)\n\n\nwith FlopTensorDispatchMode(fno) as ftdm:\n # count forward flops\n res = fno(model_input).mean()\n fno_forward_flops = deepcopy(ftdm.flop_counts)\n \n ftdm.reset()\n res.backward()\n fno_backward_flops = deepcopy(ftdm.flop_counts)"
]
},
{
Expand Down Expand Up @@ -71,7 +71,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -51,7 +51,7 @@
},
"outputs": [],
"source": [
"model = SFNO(n_modes=(32, 32),\n in_channels=3,\n out_channels=3,\n hidden_channels=32,\n projection_channels=64,\n factorization='dense')\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
"model = SFNO(n_modes=(32, 32), in_channels=3, out_channels=3, hidden_channels=32, projection_channels=64, factorization='dense')\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
]
},
{
Expand Down Expand Up @@ -172,7 +172,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down

This file was deleted.

Original file line number Diff line number Diff line change
Expand Up @@ -31,12 +31,7 @@
# %%
# We create a tensorized FNO model

model = SFNO(n_modes=(32, 32),
in_channels=3,
out_channels=3,
hidden_channels=32,
projection_channels=64,
factorization='dense')
model = SFNO(n_modes=(32, 32), in_channels=3, out_channels=3, hidden_channels=32, projection_channels=64, factorization='dense')
model = model.to(device)

n_params = count_model_params(model)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -107,7 +107,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
Binary file not shown.
Original file line number Diff line number Diff line change
Expand Up @@ -19,13 +19,13 @@
device = 'cpu'

fno = FNO(n_modes=(64,64),
in_channels=1,
in_channels=3,
out_channels=1,
hidden_channels=64,
projection_channels=64)

batch_size = 4
model_input = torch.randn(batch_size, 1, 128, 128)
model_input = torch.randn(batch_size, 3, 128, 128)


with FlopTensorDispatchMode(fno) as ftdm:
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"\n# Training an FNO on Darcy-Flow\n\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package\nto train a Fourier-Neural Operator\n"
"\n# Training a TFNO on Darcy-Flow\n\nIn this example, we demonstrate how to use the small Darcy-Flow example we ship with the package\nto train a Tensorized Fourier-Neural Operator\n"
]
},
{
Expand All @@ -15,7 +15,7 @@
},
"outputs": [],
"source": [
"import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import FNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'"
"import torch\nimport matplotlib.pyplot as plt\nimport sys\nfrom neuralop.models import TFNO\nfrom neuralop import Trainer\nfrom neuralop.training import AdamW\nfrom neuralop.data.datasets import load_darcy_flow_small\nfrom neuralop.utils import count_model_params\nfrom neuralop import LpLoss, H1Loss\n\ndevice = 'cpu'"
]
},
{
Expand All @@ -40,7 +40,7 @@
"cell_type": "markdown",
"metadata": {},
"source": [
"We create a simple FNO model\n\n"
"We create a tensorized FNO model\n\n"
]
},
{
Expand All @@ -51,7 +51,7 @@
},
"outputs": [],
"source": [
"model = FNO(n_modes=(16, 16),\n in_channels=1, \n out_channels=1,\n hidden_channels=32, \n projection_channels=64)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
"model = TFNO(n_modes=(16, 16), in_channels=1, hidden_channels=32, projection_channels=64, factorization='tucker', rank=0.42)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
]
},
{
Expand Down Expand Up @@ -172,7 +172,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -33,7 +33,7 @@
},
"outputs": [],
"source": [
"train_loader, test_loaders, data_processor = load_darcy_flow_small(\n n_train=1000, batch_size=32, \n test_resolutions=[16, 32], n_tests=[100, 50],\n test_batch_sizes=[32, 32],\n)\n\nmodel = UNO(in_channels=1, \n out_channels=1, \n hidden_channels=64, \n projection_channels=64,\n uno_out_channels=[32,64,64,64,32],\n uno_n_modes=[[16,16],[8,8],[8,8],[8,8],[16,16]],\n uno_scalings=[[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],\n horizontal_skips_map=None,\n channel_mlp_skip=\"linear\",\n n_layers = 5,\n domain_padding=0.2)\n\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
"train_loader, test_loaders, data_processor = load_darcy_flow_small(\n n_train=1000, batch_size=32, \n test_resolutions=[16, 32], n_tests=[100, 50],\n test_batch_sizes=[32, 32],\n)\n\n\n\nmodel = UNO(in_channels=1, out_channels=1, hidden_channels=64, projection_channels=64,uno_out_channels = [32,64,64,64,32], \\\n uno_n_modes= [[16,16],[8,8],[8,8],[8,8],[16,16]], uno_scalings= [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],\\\n horizontal_skips_map = None, n_layers = 5, domain_padding = 0.2)\nmodel = model.to(device)\n\nn_params = count_model_params(model)\nprint(f'\\nOur model has {n_params} parameters.')\nsys.stdout.flush()"
]
},
{
Expand Down Expand Up @@ -154,7 +154,7 @@
"name": "python",
"nbconvert_exporter": "python",
"pygments_lexer": "ipython3",
"version": "3.10.14"
"version": "3.9.19"
}
},
"nbformat": 4,
Expand Down
15 changes: 4 additions & 11 deletions dev/_downloads/f9c1e50d48828a746410d5400feb61f7/plot_UNO_darcy.py
Original file line number Diff line number Diff line change
Expand Up @@ -31,18 +31,11 @@
test_batch_sizes=[32, 32],
)

model = UNO(in_channels=1,
out_channels=1,
hidden_channels=64,
projection_channels=64,
uno_out_channels=[32,64,64,64,32],
uno_n_modes=[[16,16],[8,8],[8,8],[8,8],[16,16]],
uno_scalings=[[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],
horizontal_skips_map=None,
channel_mlp_skip="linear",
n_layers = 5,
domain_padding=0.2)


model = UNO(in_channels=1, out_channels=1, hidden_channels=64, projection_channels=64,uno_out_channels = [32,64,64,64,32], \
uno_n_modes= [[16,16],[8,8],[8,8],[8,8],[16,16]], uno_scalings= [[1.0,1.0],[0.5,0.5],[1,1],[2,2],[1,1]],\
horizontal_skips_map = None, n_layers = 5, domain_padding = 0.2)
model = model.to(device)

n_params = count_model_params(model)
Expand Down
Binary file modified dev/_images/sphx_glr_plot_FNO_darcy_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_FNO_darcy_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_SFNO_swe_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_SFNO_swe_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_UNO_darcy_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_UNO_darcy_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_count_flops_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_darcy_flow_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_darcy_flow_spectrum_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_incremental_FNO_darcy_001.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
Binary file modified dev/_images/sphx_glr_plot_incremental_FNO_darcy_thumb.png
Loading
Sorry, something went wrong. Reload?
Sorry, we cannot display this file.
Sorry, this file is invalid so it cannot be displayed.
3 changes: 1 addition & 2 deletions dev/_modules/index.html
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

<link rel="stylesheet" type="text/css" href="../_static/pygments.css?v=a746c00c" />
<link rel="stylesheet" type="text/css" href="../_static/tensorly_style.css?v=a02e9698" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery.css?v=61a4c737" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery.css?v=d2d258e8" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery-binder.css?v=f4aeca0c" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery-dataframe.css?v=2082cf3c" />
<link rel="stylesheet" type="text/css" href="../_static/sg_gallery-rendered-html.css?v=1277b6f3" />
Expand Down Expand Up @@ -115,7 +115,6 @@
<h1>All modules for which code is available</h1>
<ul><li><a href="neuralop/data/datasets/darcy.html">neuralop.data.datasets.darcy</a></li>
<li><a href="neuralop/data/transforms/data_processors.html">neuralop.data.transforms.data_processors</a></li>
<li><a href="neuralop/layers/embeddings.html">neuralop.layers.embeddings</a></li>
<li><a href="neuralop/layers/integral_transform.html">neuralop.layers.integral_transform</a></li>
<li><a href="neuralop/layers/neighbor_search.html">neuralop.layers.neighbor_search</a></li>
<li><a href="neuralop/layers/padding.html">neuralop.layers.padding</a></li>
Expand Down
2 changes: 1 addition & 1 deletion dev/_modules/neuralop/data/datasets/darcy.html
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,7 @@

<link rel="stylesheet" type="text/css" href="../../../../_static/pygments.css?v=a746c00c" />
<link rel="stylesheet" type="text/css" href="../../../../_static/tensorly_style.css?v=a02e9698" />
<link rel="stylesheet" type="text/css" href="../../../../_static/sg_gallery.css?v=61a4c737" />
<link rel="stylesheet" type="text/css" href="../../../../_static/sg_gallery.css?v=d2d258e8" />
<link rel="stylesheet" type="text/css" href="../../../../_static/sg_gallery-binder.css?v=f4aeca0c" />
<link rel="stylesheet" type="text/css" href="../../../../_static/sg_gallery-dataframe.css?v=2082cf3c" />
<link rel="stylesheet" type="text/css" href="../../../../_static/sg_gallery-rendered-html.css?v=1277b6f3" />
Expand Down
Loading

0 comments on commit 6d1bca0

Please sign in to comment.