diff --git a/README.md b/README.md index f8eeed013..f3ff67d8c 100644 --- a/README.md +++ b/README.md @@ -22,8 +22,7 @@ ## About 🔎 -Minerva is a package to aid in the building, fitting and testing of neural network models on geo-spatial -rasterised land cover data. +Minerva is a package to aid in the building, fitting and testing of neural network models on multi-spectral geo-spatial data. ## Getting Started ▶ @@ -122,11 +121,19 @@ Contributions also provided by: - [Isabel Sargent](https://github.com/PenguinJunk) - [Steve Coupland](https://github.com/scoupland-os) - [Joe Guyatt](https://github.com/joeguyatt97) +- [Ben Dickens](https://github.com/BenDickens) +- [Kitty Varghese](https://github.com/kittyvarghese) ## Acknowledgments 📢 -I'd like to acknowledge the invaluable supervision and contributions of Prof Jonathon Hare and -Dr Isabel Sargent towards this work. +I'd like to acknowledge the invaluable supervision and contributions of [Prof Jonathon Hare](https://github.com/jonhare) and +[Dr Isabel Sargent](https://github.com/PenguinJunk) towards this work. + +The following modules are adapted from open source third-parites: +| Module | Original Author | License | Link | +|:-------|:----------------|:--------|:-----| +| `pytorchtools` | [Noah Golmant](https://github.com/noahgolmant) | MIT | https://github.com/noahgolmant/pytorch-lars/blob/master/lars.py | +| `optimisers` | [Bjarte Mehus Sunde](https://github.com/Bjarten) | MIT | https://github.com/Bjarten/early-stopping-pytorch | This repositry also contains two small ``.tiff`` exercpts from the [ChesapeakeCVPR](https://lila.science/datasets/chesapeakelandcover) dataset used for unit testing purposes. Credit for this data goes to: diff --git a/minerva/models/core.py b/minerva/models/core.py index 259ea15ca..a745082b6 100644 --- a/minerva/models/core.py +++ b/minerva/models/core.py @@ -234,8 +234,8 @@ def __init__( self.model = model_cls(*args, **kwargs) - def __call__(self, *input) -> Any: - return self.forward(*input) + def __call__(self, *inputs) -> Any: + return self.forward(*inputs) def __getattr__(self, name): try: @@ -246,8 +246,8 @@ def __getattr__(self, name): def __repr__(self) -> Any: return self.model.__repr__() - def forward(self, *input) -> Any: - return self.model.forward(*input) + def forward(self, *inputs) -> Any: + return self.model.forward(*inputs) class MinervaBackbone(MinervaModel): @@ -297,22 +297,22 @@ def __init__( super(MinervaDataParallel, self).__init__() self.model = paralleliser(model, *args, **kwargs).cuda() - def forward(self, *input: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]: + def forward(self, *inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]: """Ensures a forward call to the model goes to the actual wrapped model. Args: - input (tuple[~torch.Tensor, ...]): Input of tensors to be parsed to the + inputs (tuple[~torch.Tensor, ...]): Input of tensors to be parsed to the :attr:`~MinervaDataParallel.model` forward. Returns: tuple[~torch.Tensor, ...]: Output of :attr:`~MinervaDataParallel.model`. """ - z = self.model(*input) + z = self.model(*inputs) assert isinstance(z, tuple) and list(map(type, z)) == [Tensor] * len(z) return z - def __call__(self, *input) -> Tuple[Tensor, ...]: - return self.forward(*input) + def __call__(self, *inputs) -> Tuple[Tensor, ...]: + return self.forward(*inputs) def __getattr__(self, name): try: @@ -339,8 +339,8 @@ def __init__(self, model: Module, *args, **kwargs) -> None: self.model = model - def __call__(self, *input) -> Any: - return self.model.forward(*input) + def __call__(self, *inputs) -> Any: + return self.model.forward(*inputs) def __getattr__(self, name) -> Any: try: @@ -351,16 +351,16 @@ def __getattr__(self, name) -> Any: def __repr__(self) -> Any: return self.model.__repr__() - def forward(self, *input: Any) -> Any: + def forward(self, *inputs: Any) -> Any: """Performs a forward pass of the :attr:`~MinervaOnnxModel.model` within. Args: - input (~typing.Any): Input to be parsed to the ``.forward`` method of :attr:`~MinervaOnnxModel.model`. + inputs (~typing.Any): Input to be parsed to the ``.forward`` method of :attr:`~MinervaOnnxModel.model`. Returns: ~typing.Any: Output of :attr:`~MinervaOnnxModel.model`. """ - return self.model.forward(*input) + return self.model.forward(*inputs) # ===================================================================================================================== diff --git a/minerva/trainer.py b/minerva/trainer.py index 18843ed5f..d08a39d57 100644 --- a/minerva/trainer.py +++ b/minerva/trainer.py @@ -1267,13 +1267,13 @@ def save_model_weights(self, fn: Optional[str] = None) -> None: torch.save(model.state_dict(), f"{fn}.pt") def save_model( - self, fn: Optional[Union[Path, str]] = None, format: str = "pt" + self, fn: Optional[Union[Path, str]] = None, fmt: str = "pt" ) -> None: """Saves the model object itself to :mod:`torch` file. Args: fn (~pathlib.Path | str): Optional; Filename and path (excluding extension) to save model to. - format (str): Optional; Format to save model to. ``pt`` for :mod:`torch`, or :mod:`onnx` for ONNX. + fmt (str): Optional; Format to save model to. ``pt`` for :mod:`torch`, or :mod:`onnx` for ONNX. Raises: ValueError: If format is not recognised. @@ -1283,13 +1283,13 @@ def save_model( if fn is None: fn = str(self.exp_fn) - if format == "pt": + if fmt == "pt": torch.save(model, f"{fn}.pt") - elif format == "onnx": + elif fmt == "onnx": x = torch.rand(*self.get_input_size(), device=self.device) torch.onnx.export(model, (x,), f"{fn}.onnx") else: - raise ValueError(f"format {format} unrecognised!") + raise ValueError(f"format {fmt} unrecognised!") def save_backbone(self) -> None: """Readies the model for use in downstream tasks and saves to file.""" diff --git a/notebooks/Torchgeo_FCN_Demo.ipynb b/notebooks/Torchgeo_FCN_Demo.ipynb new file mode 100644 index 000000000..12ead8c03 --- /dev/null +++ b/notebooks/Torchgeo_FCN_Demo.ipynb @@ -0,0 +1,194 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "from torch.utils.data import DataLoader\n", + "from torchvision.models.segmentation import fcn_resnet50\n", + "import torch.nn as nn\n", + "from torchgeo.datasets import NAIP, ChesapeakeDE, stack_samples\n", + "from torchgeo.datasets.utils import download_url\n", + "from torchgeo.samplers import RandomGeoSampler\n", + "from torch.nn import CrossEntropyLoss\n", + "from torch.optim import Adam\n", + "import torch\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from minerva.models import FCN8ResNet18\n", + "from minerva.utils.utils import get_cuda_device\n", + "\n", + "device = get_cuda_device(0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_root = tempfile.gettempdir()\n", + "train_root = Path(data_root, \"naip\", \"train\")\n", + "test_root = Path(data_root, \"naip\", \"test\")\n", + "naip_url = \"https://naipeuwest.blob.core.windows.net/naip/v002/de/2018/de_060cm_2018/38075/\"\n", + "tiles = [\n", + " \"m_3807511_ne_18_060_20181104.tif\",\n", + " \"m_3807511_se_18_060_20181104.tif\",\n", + " \"m_3807512_nw_18_060_20180815.tif\",\n", + "]\n", + "\n", + "for tile in tiles:\n", + " download_url(naip_url + tile, train_root)\n", + "\n", + "download_url(naip_url + \"m_3807512_sw_18_060_20180815.tif\", test_root)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_naip = NAIP(train_root)\n", + "test_naip = NAIP(test_root)\n", + "\n", + "chesapeake_root = os.path.join(data_root, \"chesapeake\")\n", + "\n", + "chesapeake = ChesapeakeDE(chesapeake_root, crs=train_naip.crs, res=train_naip.res, download=True)\n", + "\n", + "train_dataset = train_naip & chesapeake\n", + "test_dataset = test_naip & chesapeake\n", + "\n", + "sampler = RandomGeoSampler(train_naip, size=256, length=200)\n", + "dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\n", + "\n", + "testsampler = RandomGeoSampler(test_naip, size=256, length=8)\n", + "testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\n", + "testdata = list(testdataloader)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "crit = CrossEntropyLoss()\n", + "\n", + "# Criterions are normally parsed to models at init in minerva.\n", + "fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\n", + "opt = Adam(fcn.parameters(), lr=1e-3)\n", + "\n", + "# Optimisers need to be set to a model in minerva before training.\n", + "fcn.set_optimiser(opt)\n", + "\n", + "for epoch in range(101):\n", + " losses = []\n", + " for i, sample in enumerate(dataloader):\n", + " image = sample[\"image\"].to(device).float() / 255.0\n", + " target = sample[\"mask\"].to(device).long().squeeze(1)\n", + " \n", + " # Uses MinervaModel.step.\n", + " loss, pred = fcn.step(image, target, train=True)\n", + " losses.append(loss.item())\n", + "\n", + " print(epoch, np.mean(losses))\n", + " if epoch % 10 == 0:\n", + " with torch.no_grad():\n", + " image = testdata[\"image\"].to(device).float() / 255.0\n", + " target = testdata[\"mask\"].to(device).long().squeeze(1)\n", + " pred = fcn(image)\n", + "\n", + " fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\n", + " for i in range(pred.shape[0]):\n", + " axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\n", + " axs[1,i].imshow(target[i].cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fcn = fcn_resnet50(num_classes=13).to(device)\n", + "fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\n", + "\n", + "crit = CrossEntropyLoss()\n", + "opt = Adam(fcn.parameters(), lr=1e-3)\n", + "\n", + "for epoch in range(101):\n", + " losses = []\n", + " for i, sample in enumerate(dataloader):\n", + " image = sample[\"image\"].to(device).float() / 255.0\n", + " target = sample[\"mask\"].to(device).long().squeeze(1)\n", + "\n", + " opt.zero_grad()\n", + " pred = fcn(image)[\"out\"]\n", + " loss = crit(pred, target)\n", + " loss.backward()\n", + " opt.step()\n", + " losses.append(loss.item())\n", + "\n", + " print(epoch, np.mean(losses))\n", + " if epoch % 10 == 0:\n", + " with torch.no_grad():\n", + " image = testdata[\"image\"].to(device).float() / 255.0\n", + " target = testdata[\"mask\"].to(device).long().squeeze(1)\n", + " pred = fcn(image)[\"out\"]\n", + "\n", + " fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\n", + " for i in range(pred.shape[0]):\n", + " axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\n", + " axs[1,i].imshow(target[i].cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n", + " plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "minerva-310", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0]" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/Torchgeo_FCN_Full.ipynb b/notebooks/Torchgeo_FCN_Full.ipynb new file mode 100644 index 000000000..0ad93df16 --- /dev/null +++ b/notebooks/Torchgeo_FCN_Full.ipynb @@ -0,0 +1,174 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "from torch.utils.data import DataLoader\n", + "from torchvision.models.segmentation import fcn_resnet50\n", + "import torch.nn as nn\n", + "from torchgeo.datasets import ChesapeakeCVPR, stack_samples\n", + "from torchgeo.samplers import RandomGeoSampler\n", + "from torch.nn import CrossEntropyLoss\n", + "from torch.optim import Adam\n", + "import torch\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from minerva.models import FCN8ResNet18\n", + "from minerva.utils.utils import get_cuda_device\n", + "\n", + "device = get_cuda_device(0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_root = Path(tempfile.gettempdir())\n", + "train_root = data_root / \"train\"\n", + "test_root = data_root / \"test\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_dataset = ChesapeakeCVPR(str(train_root), splits=[\"de-train\", \"ny-train\", \"wv-train\"], layers=[\"naip-new\", \"naip-old\", \"lc\"], download=True)\n", + "test_dataset = ChesapeakeCVPR(str(test_root), splits=[\"md-test\", \"va-test\"], layers=[\"naip-new\", \"naip-old\", \"lc\"], download=True)\n", + "\n", + "sampler = RandomGeoSampler(train_dataset, size=256, length=200)\n", + "dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\n", + "\n", + "testsampler = RandomGeoSampler(test_dataset, size=256, length=8)\n", + "testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\n", + "testdata = list(testdataloader)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "crit = CrossEntropyLoss()\n", + "\n", + "# Criterions are normally parsed to models at init in minerva.\n", + "fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\n", + "opt = Adam(fcn.parameters(), lr=1e-3)\n", + "\n", + "# Optimisers need to be set to a model in minerva before training.\n", + "fcn.set_optimiser(opt)\n", + "\n", + "for epoch in range(101):\n", + " losses = []\n", + " for i, sample in enumerate(dataloader):\n", + " image = sample[\"image\"].to(device).float() / 255.0\n", + " target = sample[\"mask\"].to(device).long().squeeze(1)\n", + " \n", + " # Uses MinervaModel.step.\n", + " loss, pred = fcn.step(image, target, train=True)\n", + " losses.append(loss.item())\n", + "\n", + " print(epoch, np.mean(losses))\n", + " if epoch % 10 == 0:\n", + " with torch.no_grad():\n", + " image = testdata[\"image\"].to(device).float() / 255.0\n", + " target = testdata[\"mask\"].to(device).long().squeeze(1)\n", + " pred = fcn(image)\n", + "\n", + " fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\n", + " for i in range(pred.shape[0]):\n", + " axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\n", + " axs[1,i].imshow(target[i].cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fcn = fcn_resnet50(num_classes=13).to(device)\n", + "fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\n", + "\n", + "crit = CrossEntropyLoss()\n", + "opt = Adam(fcn.parameters(), lr=1e-3)\n", + "\n", + "for epoch in range(101):\n", + " losses = []\n", + " for i, sample in enumerate(dataloader):\n", + " image = sample[\"image\"].to(device).float() / 255.0\n", + " target = sample[\"mask\"].to(device).long().squeeze(1)\n", + "\n", + " opt.zero_grad()\n", + " pred = fcn(image)[\"out\"]\n", + " loss = crit(pred, target)\n", + " loss.backward()\n", + " opt.step()\n", + " losses.append(loss.item())\n", + "\n", + " print(epoch, np.mean(losses))\n", + " if epoch % 10 == 0:\n", + " with torch.no_grad():\n", + " image = testdata[\"image\"].to(device).float() / 255.0\n", + " target = testdata[\"mask\"].to(device).long().squeeze(1)\n", + " pred = fcn(image)[\"out\"]\n", + "\n", + " fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\n", + " for i in range(pred.shape[0]):\n", + " axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\n", + " axs[1,i].imshow(target[i].cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n", + " plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "minerva-310", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/notebooks/Torchgeo_FCN_Small.ipynb b/notebooks/Torchgeo_FCN_Small.ipynb new file mode 100644 index 000000000..12ead8c03 --- /dev/null +++ b/notebooks/Torchgeo_FCN_Small.ipynb @@ -0,0 +1,194 @@ +{ + "cells": [ + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import os\n", + "import tempfile\n", + "from pathlib import Path\n", + "\n", + "from torch.utils.data import DataLoader\n", + "from torchvision.models.segmentation import fcn_resnet50\n", + "import torch.nn as nn\n", + "from torchgeo.datasets import NAIP, ChesapeakeDE, stack_samples\n", + "from torchgeo.datasets.utils import download_url\n", + "from torchgeo.samplers import RandomGeoSampler\n", + "from torch.nn import CrossEntropyLoss\n", + "from torch.optim import Adam\n", + "import torch\n", + "import numpy as np\n", + "import matplotlib.pyplot as plt" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from minerva.models import FCN8ResNet18\n", + "from minerva.utils.utils import get_cuda_device\n", + "\n", + "device = get_cuda_device(0)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "data_root = tempfile.gettempdir()\n", + "train_root = Path(data_root, \"naip\", \"train\")\n", + "test_root = Path(data_root, \"naip\", \"test\")\n", + "naip_url = \"https://naipeuwest.blob.core.windows.net/naip/v002/de/2018/de_060cm_2018/38075/\"\n", + "tiles = [\n", + " \"m_3807511_ne_18_060_20181104.tif\",\n", + " \"m_3807511_se_18_060_20181104.tif\",\n", + " \"m_3807512_nw_18_060_20180815.tif\",\n", + "]\n", + "\n", + "for tile in tiles:\n", + " download_url(naip_url + tile, train_root)\n", + "\n", + "download_url(naip_url + \"m_3807512_sw_18_060_20180815.tif\", test_root)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "train_naip = NAIP(train_root)\n", + "test_naip = NAIP(test_root)\n", + "\n", + "chesapeake_root = os.path.join(data_root, \"chesapeake\")\n", + "\n", + "chesapeake = ChesapeakeDE(chesapeake_root, crs=train_naip.crs, res=train_naip.res, download=True)\n", + "\n", + "train_dataset = train_naip & chesapeake\n", + "test_dataset = test_naip & chesapeake\n", + "\n", + "sampler = RandomGeoSampler(train_naip, size=256, length=200)\n", + "dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\n", + "\n", + "testsampler = RandomGeoSampler(test_naip, size=256, length=8)\n", + "testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\n", + "testdata = list(testdataloader)[0]" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "crit = CrossEntropyLoss()\n", + "\n", + "# Criterions are normally parsed to models at init in minerva.\n", + "fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\n", + "opt = Adam(fcn.parameters(), lr=1e-3)\n", + "\n", + "# Optimisers need to be set to a model in minerva before training.\n", + "fcn.set_optimiser(opt)\n", + "\n", + "for epoch in range(101):\n", + " losses = []\n", + " for i, sample in enumerate(dataloader):\n", + " image = sample[\"image\"].to(device).float() / 255.0\n", + " target = sample[\"mask\"].to(device).long().squeeze(1)\n", + " \n", + " # Uses MinervaModel.step.\n", + " loss, pred = fcn.step(image, target, train=True)\n", + " losses.append(loss.item())\n", + "\n", + " print(epoch, np.mean(losses))\n", + " if epoch % 10 == 0:\n", + " with torch.no_grad():\n", + " image = testdata[\"image\"].to(device).float() / 255.0\n", + " target = testdata[\"mask\"].to(device).long().squeeze(1)\n", + " pred = fcn(image)\n", + "\n", + " fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\n", + " for i in range(pred.shape[0]):\n", + " axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\n", + " axs[1,i].imshow(target[i].cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n", + " plt.show()" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "fcn = fcn_resnet50(num_classes=13).to(device)\n", + "fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\n", + "\n", + "crit = CrossEntropyLoss()\n", + "opt = Adam(fcn.parameters(), lr=1e-3)\n", + "\n", + "for epoch in range(101):\n", + " losses = []\n", + " for i, sample in enumerate(dataloader):\n", + " image = sample[\"image\"].to(device).float() / 255.0\n", + " target = sample[\"mask\"].to(device).long().squeeze(1)\n", + "\n", + " opt.zero_grad()\n", + " pred = fcn(image)[\"out\"]\n", + " loss = crit(pred, target)\n", + " loss.backward()\n", + " opt.step()\n", + " losses.append(loss.item())\n", + "\n", + " print(epoch, np.mean(losses))\n", + " if epoch % 10 == 0:\n", + " with torch.no_grad():\n", + " image = testdata[\"image\"].to(device).float() / 255.0\n", + " target = testdata[\"mask\"].to(device).long().squeeze(1)\n", + " pred = fcn(image)[\"out\"]\n", + "\n", + " fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\n", + " for i in range(pred.shape[0]):\n", + " axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\n", + " axs[1,i].imshow(target[i].cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\"Set3\", vmin=0, vmax=12)\n", + " plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\n", + " plt.show()" + ] + } + ], + "metadata": { + "kernelspec": { + "display_name": "minerva-310", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0]" + }, + "orig_nbformat": 4, + "vscode": { + "interpreter": { + "hash": "3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff" + } + } + }, + "nbformat": 4, + "nbformat_minor": 2 +} diff --git a/pyproject.toml b/pyproject.toml index 97c6e98dd..21bd98ab4 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -18,7 +18,7 @@ classifiers = [ "Programming Language :: Python :: 3.9", "Programming Language :: Python :: 3.8", "Programming Language :: Python :: 3 :: Only", - "License :: OSI Approved :: GNU Lesser General Public License v3 (GPLv3)", + "License :: OSI Approved :: MIT License", "Development Status :: 4 - Beta", "Operating System :: POSIX :: Linux", "Natural Language :: English", diff --git a/qodana.sarif.json b/qodana.sarif.json index ac589216d..2f264faa8 100644 --- a/qodana.sarif.json +++ b/qodana.sarif.json @@ -1580,13 +1580,13 @@ ] }, { - "id": "PyDefaultArgumentInspection", + "id": "PyMissingConstructorInspection", "shortDescription": { - "text": "The default argument is mutable" + "text": "Missed call to '__init__' of the super class" }, "fullDescription": { - "text": "Reports a problem when a mutable value as a list or dictionary is detected in a default value for an argument. Default argument values are evaluated only once at function definition time, which means that modifying the default value of the argument will affect all subsequent calls of that function. Example: def func(s, cache={}):\n cache[s] = None\n When the quick-fix is applied, the code changes to: def func(s, cache=None):\n if cache is None:\n cache = {}\n cache[s] = None", - "markdown": "Reports a problem when a mutable value as a list or dictionary is detected in a default value for\nan argument. \n\nDefault argument values are evaluated only once at function definition time,\nwhich means that modifying the\ndefault value of the argument will affect all subsequent calls of that function.\n\n**Example:**\n\n```\ndef func(s, cache={}):\n cache[s] = None\n```\n\nWhen the quick-fix is applied, the code changes to:\n\n```\ndef func(s, cache=None):\n if cache is None:\n cache = {}\n cache[s] = None\n```" + "text": "Reports cases when a call to the 'super' constructor in a class is missed. Example: class Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n pass\n The 'Pear' class should have a 'super' call in the '__init__' method. When the quick-fix is applied, the code changes to: class Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n super().__init__()", + "markdown": "Reports cases when a call to the `super` constructor in a class is missed.\n\n**Example:**\n\n```\nclass Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n pass\n```\n\nThe `Pear` class should have a `super` call in the `__init__`\nmethod.\n\nWhen the quick-fix is applied, the code changes to:\n\n```\nclass Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n super().__init__()\n```" }, "defaultConfiguration": { "enabled": true, @@ -1611,13 +1611,13 @@ ] }, { - "id": "PyMissingConstructorInspection", + "id": "PyDefaultArgumentInspection", "shortDescription": { - "text": "Missed call to '__init__' of the super class" + "text": "The default argument is mutable" }, "fullDescription": { - "text": "Reports cases when a call to the 'super' constructor in a class is missed. Example: class Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n pass\n The 'Pear' class should have a 'super' call in the '__init__' method. When the quick-fix is applied, the code changes to: class Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n super().__init__()", - "markdown": "Reports cases when a call to the `super` constructor in a class is missed.\n\n**Example:**\n\n```\nclass Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n pass\n```\n\nThe `Pear` class should have a `super` call in the `__init__`\nmethod.\n\nWhen the quick-fix is applied, the code changes to:\n\n```\nclass Fruit:\n def __init__(self):\n pass\n\n\nclass Pear(Fruit):\n def __init__(self):\n super().__init__()\n```" + "text": "Reports a problem when a mutable value as a list or dictionary is detected in a default value for an argument. Default argument values are evaluated only once at function definition time, which means that modifying the default value of the argument will affect all subsequent calls of that function. Example: def func(s, cache={}):\n cache[s] = None\n When the quick-fix is applied, the code changes to: def func(s, cache=None):\n if cache is None:\n cache = {}\n cache[s] = None", + "markdown": "Reports a problem when a mutable value as a list or dictionary is detected in a default value for\nan argument. \n\nDefault argument values are evaluated only once at function definition time,\nwhich means that modifying the\ndefault value of the argument will affect all subsequent calls of that function.\n\n**Example:**\n\n```\ndef func(s, cache={}):\n cache[s] = None\n```\n\nWhen the quick-fix is applied, the code changes to:\n\n```\ndef func(s, cache=None):\n if cache is None:\n cache = {}\n cache[s] = None\n```" }, "defaultConfiguration": { "enabled": true, @@ -1952,13 +1952,13 @@ ] }, { - "id": "PyChainedComparisonsInspection", + "id": "PyMethodParametersInspection", "shortDescription": { - "text": "Too complex chained comparisons" + "text": "Improper first parameter" }, "fullDescription": { - "text": "Reports chained comparisons that can be simplified. Example: 'def do_comparison(x):\n xmin = 10\n xmax = 100\n if x >= xmin and x <= xmax:\n pass' The IDE offers to simplify 'if x >= xmin and x <= xmax'. When the quick-fix is applied, the code changes to: 'def do_comparison(x):\n xmin = 10\n xmax = 100\n if xmin <= x <= xmax:\n pass'", - "markdown": "Reports chained comparisons that can be simplified.\n\n**Example:**\n\n\n def do_comparison(x):\n xmin = 10\n xmax = 100\n if x >= xmin and x <= xmax:\n pass\n\nThe IDE offers to simplify `if x >= xmin and x <= xmax`.\nWhen the quick-fix is applied, the code changes to:\n\n\n def do_comparison(x):\n xmin = 10\n xmax = 100\n if xmin <= x <= xmax:\n pass\n" + "text": "Reports methods that lack the first parameter that is usually named 'self'. Example: class Movie:\n\n def show():\n pass\n When the quick-fix is applied, the code changes to: class Movie:\n\n def show(self):\n pass\n The inspection also reports naming issues in class methods. Example: class Movie:\n @classmethod\n def show(abc):\n pass\n Since the first parameter of a class method should be 'cls', the IDE provides a quick-fix to rename it.", + "markdown": "Reports methods that lack the first parameter that is usually\nnamed `self`.\n\n**Example:**\n\n```\nclass Movie:\n\n def show():\n pass\n```\n\nWhen the quick-fix is applied, the code changes to:\n\n```\nclass Movie:\n\n def show(self):\n pass\n```\n\nThe inspection also reports naming issues in class methods.\n\n**Example:**\n\n```\nclass Movie:\n @classmethod\n def show(abc):\n pass\n```\n\nSince the first parameter of a class method should be `cls`, the IDE provides a quick-fix\nto rename it." }, "defaultConfiguration": { "enabled": true, @@ -1983,13 +1983,13 @@ ] }, { - "id": "PyMethodParametersInspection", + "id": "PyChainedComparisonsInspection", "shortDescription": { - "text": "Improper first parameter" + "text": "Too complex chained comparisons" }, "fullDescription": { - "text": "Reports methods that lack the first parameter that is usually named 'self'. Example: class Movie:\n\n def show():\n pass\n When the quick-fix is applied, the code changes to: class Movie:\n\n def show(self):\n pass\n The inspection also reports naming issues in class methods. Example: class Movie:\n @classmethod\n def show(abc):\n pass\n Since the first parameter of a class method should be 'cls', the IDE provides a quick-fix to rename it.", - "markdown": "Reports methods that lack the first parameter that is usually\nnamed `self`.\n\n**Example:**\n\n```\nclass Movie:\n\n def show():\n pass\n```\n\nWhen the quick-fix is applied, the code changes to:\n\n```\nclass Movie:\n\n def show(self):\n pass\n```\n\nThe inspection also reports naming issues in class methods.\n\n**Example:**\n\n```\nclass Movie:\n @classmethod\n def show(abc):\n pass\n```\n\nSince the first parameter of a class method should be `cls`, the IDE provides a quick-fix\nto rename it." + "text": "Reports chained comparisons that can be simplified. Example: 'def do_comparison(x):\n xmin = 10\n xmax = 100\n if x >= xmin and x <= xmax:\n pass' The IDE offers to simplify 'if x >= xmin and x <= xmax'. When the quick-fix is applied, the code changes to: 'def do_comparison(x):\n xmin = 10\n xmax = 100\n if xmin <= x <= xmax:\n pass'", + "markdown": "Reports chained comparisons that can be simplified.\n\n**Example:**\n\n\n def do_comparison(x):\n xmin = 10\n xmax = 100\n if x >= xmin and x <= xmax:\n pass\n\nThe IDE offers to simplify `if x >= xmin and x <= xmax`.\nWhen the quick-fix is applied, the code changes to:\n\n\n def do_comparison(x):\n xmin = 10\n xmax = 100\n if xmin <= x <= xmax:\n pass\n" }, "defaultConfiguration": { "enabled": true, @@ -2324,19 +2324,19 @@ ] }, { - "id": "CythonUsageBeforeDeclarationInspection", + "id": "PyArgumentEqualDefaultInspection", "shortDescription": { - "text": "Cython variable is used before its declaration" + "text": "The function argument is equal to the default parameter value" }, "fullDescription": { - "text": "Reports Cython variables being referenced before declaration. Example: cdef int c_x\n\nprint(c_x, c_y) # Variable 'c_y' is used before its declaration\n\ncdef int c_y = 0", - "markdown": "Reports Cython variables being referenced before declaration.\n\n**Example:**\n\n```\ncdef int c_x\n\nprint(c_x, c_y) # Variable 'c_y' is used before its declaration\n\ncdef int c_y = 0\n```" + "text": "Reports a problem when an argument passed to the function is equal to the default parameter value. This inspection is disabled by default to avoid performance degradation. Example: def my_function(a: int = 2):\n print(a)\n\n\nmy_function(2)", + "markdown": "Reports a problem when an argument\npassed to the function is equal to the default parameter value.\n\nThis inspection is disabled by default to avoid performance degradation.\n\n**Example:**\n\n```\ndef my_function(a: int = 2):\n print(a)\n\n\nmy_function(2)\n```" }, "defaultConfiguration": { "enabled": false, - "level": "warning", + "level": "note", "parameters": { - "ideaSeverity": "WARNING" + "ideaSeverity": "WEAK WARNING" } }, "relationships": [ @@ -2355,19 +2355,19 @@ ] }, { - "id": "PyArgumentEqualDefaultInspection", + "id": "CythonUsageBeforeDeclarationInspection", "shortDescription": { - "text": "The function argument is equal to the default parameter value" + "text": "Cython variable is used before its declaration" }, "fullDescription": { - "text": "Reports a problem when an argument passed to the function is equal to the default parameter value. This inspection is disabled by default to avoid performance degradation. Example: def my_function(a: int = 2):\n print(a)\n\n\nmy_function(2)", - "markdown": "Reports a problem when an argument\npassed to the function is equal to the default parameter value.\n\nThis inspection is disabled by default to avoid performance degradation.\n\n**Example:**\n\n```\ndef my_function(a: int = 2):\n print(a)\n\n\nmy_function(2)\n```" + "text": "Reports Cython variables being referenced before declaration. Example: cdef int c_x\n\nprint(c_x, c_y) # Variable 'c_y' is used before its declaration\n\ncdef int c_y = 0", + "markdown": "Reports Cython variables being referenced before declaration.\n\n**Example:**\n\n```\ncdef int c_x\n\nprint(c_x, c_y) # Variable 'c_y' is used before its declaration\n\ncdef int c_y = 0\n```" }, "defaultConfiguration": { "enabled": false, - "level": "note", + "level": "warning", "parameters": { - "ideaSeverity": "WEAK WARNING" + "ideaSeverity": "WARNING" } }, "relationships": [ @@ -2913,19 +2913,19 @@ ] }, { - "id": "PyTrailingSemicolonInspection", + "id": "PyRedundantParenthesesInspection", "shortDescription": { - "text": "Prohibited trailing semicolon in a statement" + "text": "Redundant parentheses" }, "fullDescription": { - "text": "Reports trailing semicolons in statements. Example: def my_func(a):\n c = a ** 2;\n return c\n IDE provides a quick-fix that removes a trailing semicolon. When you apply it, the code changes to: def my_func(a):\n c = a ** 2\n return c", - "markdown": "Reports trailing semicolons in statements.\n\n**Example:**\n\n```\ndef my_func(a):\n c = a ** 2;\n return c\n```\n\nIDE provides a quick-fix that removes a trailing semicolon. When you\napply it, the code changes to:\n\n```\ndef my_func(a):\n c = a ** 2\n return c\n```" + "text": "Reports about redundant parentheses in expressions. The IDE provides the quick-fix action to remove the redundant parentheses.", + "markdown": "Reports about redundant parentheses in expressions.\n\nThe IDE provides the quick-fix action to remove the redundant parentheses." }, "defaultConfiguration": { "enabled": true, - "level": "warning", + "level": "note", "parameters": { - "ideaSeverity": "WARNING" + "ideaSeverity": "WEAK WARNING" } }, "relationships": [ @@ -2944,19 +2944,19 @@ ] }, { - "id": "PyRedundantParenthesesInspection", + "id": "PyTrailingSemicolonInspection", "shortDescription": { - "text": "Redundant parentheses" + "text": "Prohibited trailing semicolon in a statement" }, "fullDescription": { - "text": "Reports about redundant parentheses in expressions. The IDE provides the quick-fix action to remove the redundant parentheses.", - "markdown": "Reports about redundant parentheses in expressions.\n\nThe IDE provides the quick-fix action to remove the redundant parentheses." + "text": "Reports trailing semicolons in statements. Example: def my_func(a):\n c = a ** 2;\n return c\n IDE provides a quick-fix that removes a trailing semicolon. When you apply it, the code changes to: def my_func(a):\n c = a ** 2\n return c", + "markdown": "Reports trailing semicolons in statements.\n\n**Example:**\n\n```\ndef my_func(a):\n c = a ** 2;\n return c\n```\n\nIDE provides a quick-fix that removes a trailing semicolon. When you\napply it, the code changes to:\n\n```\ndef my_func(a):\n c = a ** 2\n return c\n```" }, "defaultConfiguration": { "enabled": true, - "level": "note", + "level": "warning", "parameters": { - "ideaSeverity": "WEAK WARNING" + "ideaSeverity": "WARNING" } }, "relationships": [ @@ -2975,19 +2975,19 @@ ] }, { - "id": "PyAbstractClassInspection", + "id": "PyOldStyleClassesInspection", "shortDescription": { - "text": "Class must implement all abstract methods" + "text": "Old-style class contains new-style class features" }, "fullDescription": { - "text": "Reports cases when not all abstract properties or methods are defined in a subclass. Example: from abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_triangle(self):\n pass\n When the quick-fix is applied, the IDE implements an abstract method for the 'Triangle' class: from abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_figure(self):\n pass\n\n def do_triangle(self):\n pass", - "markdown": "Reports cases when not all abstract properties or methods are defined in\na subclass.\n\n**Example:**\n\n```\nfrom abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_triangle(self):\n pass\n```\n\nWhen the quick-fix is applied, the IDE implements an abstract method for the `Triangle` class:\n\n```\nfrom abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_figure(self):\n pass\n\n def do_triangle(self):\n pass\n```" + "text": "Reports occurrences of new-style class features in old-style classes. The inspection highlights '__slots__', '__getattribute__', and 'super()' inside old-style classes.", + "markdown": "Reports occurrences of\n[new-style class features](https://www.python.org/doc/newstyle/)\nin old-style classes. The inspection highlights\n`__slots__`, `__getattribute__`, and `super()`\ninside old-style classes." }, "defaultConfiguration": { "enabled": true, - "level": "note", + "level": "warning", "parameters": { - "ideaSeverity": "WEAK WARNING" + "ideaSeverity": "WARNING" } }, "relationships": [ @@ -3006,19 +3006,19 @@ ] }, { - "id": "PyOldStyleClassesInspection", + "id": "PyAbstractClassInspection", "shortDescription": { - "text": "Old-style class contains new-style class features" + "text": "Class must implement all abstract methods" }, "fullDescription": { - "text": "Reports occurrences of new-style class features in old-style classes. The inspection highlights '__slots__', '__getattribute__', and 'super()' inside old-style classes.", - "markdown": "Reports occurrences of\n[new-style class features](https://www.python.org/doc/newstyle/)\nin old-style classes. The inspection highlights\n`__slots__`, `__getattribute__`, and `super()`\ninside old-style classes." + "text": "Reports cases when not all abstract properties or methods are defined in a subclass. Example: from abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_triangle(self):\n pass\n When the quick-fix is applied, the IDE implements an abstract method for the 'Triangle' class: from abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_figure(self):\n pass\n\n def do_triangle(self):\n pass", + "markdown": "Reports cases when not all abstract properties or methods are defined in\na subclass.\n\n**Example:**\n\n```\nfrom abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_triangle(self):\n pass\n```\n\nWhen the quick-fix is applied, the IDE implements an abstract method for the `Triangle` class:\n\n```\nfrom abc import abstractmethod, ABC\n\n\nclass Figure(ABC):\n\n @abstractmethod\n def do_figure(self):\n pass\n\n\nclass Triangle(Figure):\n def do_figure(self):\n pass\n\n def do_triangle(self):\n pass\n```" }, "defaultConfiguration": { "enabled": true, - "level": "warning", + "level": "note", "parameters": { - "ideaSeverity": "WARNING" + "ideaSeverity": "WEAK WARNING" } }, "relationships": [ @@ -3099,16 +3099,16 @@ ] }, { - "id": "Query_bound_parameters", + "id": "PyArgumentListInspection", "shortDescription": { - "text": "Query does not have required bound parameters" + "text": "Incorrect call arguments" }, "fullDescription": { - "text": "Reports GQL queries with bound parameters that don't have the necessary parameters passed to the query method call.", - "markdown": "Reports GQL queries with bound parameters that don't have the necessary\nparameters passed to the query method call." + "text": "Reports discrepancies between declared parameters and actual arguments, as well as incorrect arguments, for example, duplicate named arguments, and incorrect argument order. Example: class Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__() # unfilled parameter\nbar(5, \"#\") # unexpected argument\n The correct code fragment looks at follows: class Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__(5)\nbar(5, p2=\"#\")", + "markdown": "Reports discrepancies between declared parameters and actual arguments, as well as\nincorrect arguments, for example, duplicate named arguments, and incorrect argument order.\n\n**Example:**\n\n```\nclass Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__() # unfilled parameter\nbar(5, \"#\") # unexpected argument\n```\n\nThe correct code fragment looks at follows:\n\n```\nclass Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__(5)\nbar(5, p2=\"#\")\n```" }, "defaultConfiguration": { - "enabled": false, + "enabled": true, "level": "warning", "parameters": { "ideaSeverity": "WARNING" @@ -3117,8 +3117,8 @@ "relationships": [ { "target": { - "id": "Google App Engine (Python)", - "index": 4, + "id": "Python", + "index": 2, "toolComponent": { "name": "QDPY" } @@ -3130,16 +3130,16 @@ ] }, { - "id": "PyArgumentListInspection", + "id": "Query_bound_parameters", "shortDescription": { - "text": "Incorrect call arguments" + "text": "Query does not have required bound parameters" }, "fullDescription": { - "text": "Reports discrepancies between declared parameters and actual arguments, as well as incorrect arguments, for example, duplicate named arguments, and incorrect argument order. Example: class Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__() # unfilled parameter\nbar(5, \"#\") # unexpected argument\n The correct code fragment looks at follows: class Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__(5)\nbar(5, p2=\"#\")", - "markdown": "Reports discrepancies between declared parameters and actual arguments, as well as\nincorrect arguments, for example, duplicate named arguments, and incorrect argument order.\n\n**Example:**\n\n```\nclass Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__() # unfilled parameter\nbar(5, \"#\") # unexpected argument\n```\n\nThe correct code fragment looks at follows:\n\n```\nclass Foo:\n def __call__(self, p1: int, *, p2: str = \"%\"):\n return p2 * p1\n\n\nbar = Foo()\nbar.__call__(5)\nbar(5, p2=\"#\")\n```" + "text": "Reports GQL queries with bound parameters that don't have the necessary parameters passed to the query method call.", + "markdown": "Reports GQL queries with bound parameters that don't have the necessary\nparameters passed to the query method call." }, "defaultConfiguration": { - "enabled": true, + "enabled": false, "level": "warning", "parameters": { "ideaSeverity": "WARNING" @@ -3148,8 +3148,8 @@ "relationships": [ { "target": { - "id": "Python", - "index": 2, + "id": "Google App Engine (Python)", + "index": 4, "toolComponent": { "name": "QDPY" } @@ -3595,26 +3595,26 @@ ] }, { - "id": "PyProtectedMemberInspection", + "id": "DjangoCloseTagInspection", "shortDescription": { - "text": "Accessing a protected member of a class or a module" + "text": "Mismatched opening and closing tags" }, "fullDescription": { - "text": "Reports cases when a protected member is accessed outside the class, a descendant of the class where it is defined, or a module. class Foo:\n def _protected_method(self):\n pass\n\n\nclass Bar(Foo):\n def public_method(self):\n self._protected_method()\n\n\nfoo = Foo()\nfoo._protected_method() # Access to a protected method", - "markdown": "Reports cases when a protected member is accessed outside the class,\na descendant of the class where it is defined, or a module.\n\n```\nclass Foo:\n def _protected_method(self):\n pass\n\n\nclass Bar(Foo):\n def public_method(self):\n self._protected_method()\n\n\nfoo = Foo()\nfoo._protected_method() # Access to a protected method\n```" + "text": "Reports cases when opening tags in Django templates are not correctly matched by closing tags. Example: {% if error_message %}

{{ error_message }}

{% endif %}\n The IDE reports an error on the 'strong' tag not being closed.", + "markdown": "Reports cases when opening tags in Django templates are not correctly matched by closing tags.\n\n**Example:**\n\n```\n{% if error_message %}

{{ error_message }}

{% endif %}\n```\n\nThe IDE reports an error on the `strong` tag not being closed." }, "defaultConfiguration": { - "enabled": true, - "level": "note", + "enabled": false, + "level": "warning", "parameters": { - "ideaSeverity": "WEAK WARNING" + "ideaSeverity": "WARNING" } }, "relationships": [ { "target": { - "id": "Python", - "index": 2, + "id": "Django", + "index": 14, "toolComponent": { "name": "QDPY" } @@ -3626,26 +3626,26 @@ ] }, { - "id": "DjangoCloseTagInspection", + "id": "PyProtectedMemberInspection", "shortDescription": { - "text": "Mismatched opening and closing tags" + "text": "Accessing a protected member of a class or a module" }, "fullDescription": { - "text": "Reports cases when opening tags in Django templates are not correctly matched by closing tags. Example: {% if error_message %}

{{ error_message }}

{% endif %}\n The IDE reports an error on the 'strong' tag not being closed.", - "markdown": "Reports cases when opening tags in Django templates are not correctly matched by closing tags.\n\n**Example:**\n\n```\n{% if error_message %}

{{ error_message }}

{% endif %}\n```\n\nThe IDE reports an error on the `strong` tag not being closed." + "text": "Reports cases when a protected member is accessed outside the class, a descendant of the class where it is defined, or a module. class Foo:\n def _protected_method(self):\n pass\n\n\nclass Bar(Foo):\n def public_method(self):\n self._protected_method()\n\n\nfoo = Foo()\nfoo._protected_method() # Access to a protected method", + "markdown": "Reports cases when a protected member is accessed outside the class,\na descendant of the class where it is defined, or a module.\n\n```\nclass Foo:\n def _protected_method(self):\n pass\n\n\nclass Bar(Foo):\n def public_method(self):\n self._protected_method()\n\n\nfoo = Foo()\nfoo._protected_method() # Access to a protected method\n```" }, "defaultConfiguration": { - "enabled": false, - "level": "warning", + "enabled": true, + "level": "note", "parameters": { - "ideaSeverity": "WARNING" + "ideaSeverity": "WEAK WARNING" } }, "relationships": [ { "target": { - "id": "Django", - "index": 14, + "id": "Python", + "index": 2, "toolComponent": { "name": "QDPY" } @@ -4475,13 +4475,13 @@ ] }, { - "id": "MongoJSSideEffectsInspection", + "id": "SqlInsertIntoGeneratedColumnInspection", "shortDescription": { - "text": "Statement with side effects" + "text": "Insertion into generated columns" }, "fullDescription": { - "text": "Reports statements that can cause side effects while the data source is in read-only mode. For more information about enabling read-only mode, see Enable read-only mode for a connection in the IDE documentation. The Disable read-only mode quick-fix turns off the read-only mode for the respective data source. Example: 'db.my_collection.insertOne()'", - "markdown": "Reports statements that can cause side effects while the data source is in read-only mode.\n\nFor more information about enabling read-only mode, see\n[Enable\nread-only mode for a connection in the IDE documentation](https://www.jetbrains.com/help/datagrip/configuring-database-connections.html#enable-read-only-mode-for-a-connection).\n\nThe **Disable read-only mode** quick-fix turns off the read-only mode for the respective data source.\n\nExample:\n\n\n db.my_collection.insertOne()\n" + "text": "Reports INSERT statements that assign values to generated columns. Generated columns can be read, but their values can not be directly written. Example (PostgreSQL): 'CREATE TABLE foo\n(\n col1 INT,\n col2 INT GENERATED ALWAYS AS (col1 + 1) STORED\n);\nINSERT INTO foo(col1, col2) VALUES (1, 2);'\n You cannot insert '2' into the 'col2' column because this column is generated. For this script to work, you can change '2' to DEFAULT. 'INSERT INTO foo(col1, col2) VALUES (1, DEFAULT);'", + "markdown": "Reports INSERT statements that assign values to generated columns. Generated columns can be read, but their values can not be directly written.\n\nExample (PostgreSQL):\n\n CREATE TABLE foo\n (\n col1 INT,\n col2 INT GENERATED ALWAYS AS (col1 + 1) STORED\n );\n INSERT INTO foo(col1, col2) VALUES (1, 2);\n\nYou cannot insert `2` into the `col2` column because this column is generated.\nFor this script to work, you can change `2` to DEFAULT.\n`INSERT INTO foo(col1, col2) VALUES (1, DEFAULT);`" }, "defaultConfiguration": { "enabled": false, @@ -4493,8 +4493,8 @@ "relationships": [ { "target": { - "id": "MongoJS", - "index": 9, + "id": "SQL", + "index": 5, "toolComponent": { "name": "QDPY" } @@ -4506,13 +4506,13 @@ ] }, { - "id": "SqlInsertIntoGeneratedColumnInspection", + "id": "MongoJSSideEffectsInspection", "shortDescription": { - "text": "Insertion into generated columns" + "text": "Statement with side effects" }, "fullDescription": { - "text": "Reports INSERT statements that assign values to generated columns. Generated columns can be read, but their values can not be directly written. Example (PostgreSQL): 'CREATE TABLE foo\n(\n col1 INT,\n col2 INT GENERATED ALWAYS AS (col1 + 1) STORED\n);\nINSERT INTO foo(col1, col2) VALUES (1, 2);'\n You cannot insert '2' into the 'col2' column because this column is generated. For this script to work, you can change '2' to DEFAULT. 'INSERT INTO foo(col1, col2) VALUES (1, DEFAULT);'", - "markdown": "Reports INSERT statements that assign values to generated columns. Generated columns can be read, but their values can not be directly written.\n\nExample (PostgreSQL):\n\n CREATE TABLE foo\n (\n col1 INT,\n col2 INT GENERATED ALWAYS AS (col1 + 1) STORED\n );\n INSERT INTO foo(col1, col2) VALUES (1, 2);\n\nYou cannot insert `2` into the `col2` column because this column is generated.\nFor this script to work, you can change `2` to DEFAULT.\n`INSERT INTO foo(col1, col2) VALUES (1, DEFAULT);`" + "text": "Reports statements that can cause side effects while the data source is in read-only mode. For more information about enabling read-only mode, see Enable read-only mode for a connection in the IDE documentation. The Disable read-only mode quick-fix turns off the read-only mode for the respective data source. Example: 'db.my_collection.insertOne()'", + "markdown": "Reports statements that can cause side effects while the data source is in read-only mode.\n\nFor more information about enabling read-only mode, see\n[Enable\nread-only mode for a connection in the IDE documentation](https://www.jetbrains.com/help/datagrip/configuring-database-connections.html#enable-read-only-mode-for-a-connection).\n\nThe **Disable read-only mode** quick-fix turns off the read-only mode for the respective data source.\n\nExample:\n\n\n db.my_collection.insertOne()\n" }, "defaultConfiguration": { "enabled": false, @@ -4524,8 +4524,8 @@ "relationships": [ { "target": { - "id": "SQL", - "index": 5, + "id": "MongoJS", + "index": 9, "toolComponent": { "name": "QDPY" } @@ -4785,13 +4785,13 @@ ] }, { - "id": "SqlNamedArgumentsInspection", + "id": "SqlMultipleLimitClausesInspection", "shortDescription": { - "text": "Named arguments should be used" + "text": "Multiple row limiting/offset clauses in queries" }, "fullDescription": { - "text": "Reports arguments that are used without names in routine calls. By default, this inspection is disabled. For more information about the difference between named and unnamed parameters, see Binding Parameters by Name (Named Parameters) at docs.microsoft.com . Example (Microsoft SQL Server): 'CREATE FUNCTION foo(n INT, m INT) RETURNS INT AS\nBEGIN\n RETURN n + m;\nEND;\n\nCREATE PROCEDURE test AS\nBEGIN\n foo n = 1, m = 2;\n\n--- The following call misses parameter names and will be highlighted\n foo 1, 2;\nEND;' Parameters '1, 2' in the 'foo 1, 2;' call are highlighted because they miss names.", - "markdown": "Reports arguments that are used without names in routine calls. By default, this inspection is disabled.\n\nFor more information about the difference between named and unnamed parameters, see [Binding Parameters by Name (Named Parameters) at docs.microsoft.com](https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/binding-parameters-by-name-named-parameters).\n\nExample (Microsoft SQL Server):\n\n CREATE FUNCTION foo(n INT, m INT) RETURNS INT AS\n BEGIN\n RETURN n + m;\n END;\n\n CREATE PROCEDURE test AS\n BEGIN\n foo n = 1, m = 2;\n\n --- The following call misses parameter names and will be highlighted\n foo 1, 2;\n END;\n\nParameters `1, 2` in the `foo 1, 2;` call are highlighted because they miss names." + "text": "Reports usages of multiple row limiting clauses in a single query. Example (Microsoft SQL Server): 'create table foo(a int);\nselect top 1 * from foo order by a offset 10 rows fetch next 20 rows only;' The SELECT TOP clause is used to specify that only 1 record must be returned. The FETCH clause specifies the number of rows to return after the OFFSET clause has been processed. But as we already have the SELECT TOP limiting clause, the FETCH clause might be redundant.", + "markdown": "Reports usages of multiple row limiting clauses in a single query.\n\nExample (Microsoft SQL Server):\n\n create table foo(a int);\n select top 1 * from foo order by a offset 10 rows fetch next 20 rows only;\n\nThe SELECT TOP clause is used to specify that only 1 record must be\nreturned. The FETCH clause specifies the number of rows to return after the OFFSET\nclause has been processed. But as we already have the SELECT TOP limiting clause, the FETCH clause might be redundant." }, "defaultConfiguration": { "enabled": false, @@ -4816,13 +4816,13 @@ ] }, { - "id": "SqlMultipleLimitClausesInspection", + "id": "SqlNamedArgumentsInspection", "shortDescription": { - "text": "Multiple row limiting/offset clauses in queries" + "text": "Named arguments should be used" }, "fullDescription": { - "text": "Reports usages of multiple row limiting clauses in a single query. Example (Microsoft SQL Server): 'create table foo(a int);\nselect top 1 * from foo order by a offset 10 rows fetch next 20 rows only;' The SELECT TOP clause is used to specify that only 1 record must be returned. The FETCH clause specifies the number of rows to return after the OFFSET clause has been processed. But as we already have the SELECT TOP limiting clause, the FETCH clause might be redundant.", - "markdown": "Reports usages of multiple row limiting clauses in a single query.\n\nExample (Microsoft SQL Server):\n\n create table foo(a int);\n select top 1 * from foo order by a offset 10 rows fetch next 20 rows only;\n\nThe SELECT TOP clause is used to specify that only 1 record must be\nreturned. The FETCH clause specifies the number of rows to return after the OFFSET\nclause has been processed. But as we already have the SELECT TOP limiting clause, the FETCH clause might be redundant." + "text": "Reports arguments that are used without names in routine calls. By default, this inspection is disabled. For more information about the difference between named and unnamed parameters, see Binding Parameters by Name (Named Parameters) at docs.microsoft.com . Example (Microsoft SQL Server): 'CREATE FUNCTION foo(n INT, m INT) RETURNS INT AS\nBEGIN\n RETURN n + m;\nEND;\n\nCREATE PROCEDURE test AS\nBEGIN\n foo n = 1, m = 2;\n\n--- The following call misses parameter names and will be highlighted\n foo 1, 2;\nEND;' Parameters '1, 2' in the 'foo 1, 2;' call are highlighted because they miss names.", + "markdown": "Reports arguments that are used without names in routine calls. By default, this inspection is disabled.\n\nFor more information about the difference between named and unnamed parameters, see [Binding Parameters by Name (Named Parameters) at docs.microsoft.com](https://docs.microsoft.com/en-us/sql/odbc/reference/develop-app/binding-parameters-by-name-named-parameters).\n\nExample (Microsoft SQL Server):\n\n CREATE FUNCTION foo(n INT, m INT) RETURNS INT AS\n BEGIN\n RETURN n + m;\n END;\n\n CREATE PROCEDURE test AS\n BEGIN\n foo n = 1, m = 2;\n\n --- The following call misses parameter names and will be highlighted\n foo 1, 2;\n END;\n\nParameters `1, 2` in the `foo 1, 2;` call are highlighted because they miss names." }, "defaultConfiguration": { "enabled": false, @@ -4971,13 +4971,13 @@ ] }, { - "id": "SqlRedundantAliasInspection", + "id": "SqlIdentifierInspection", "shortDescription": { - "text": "Redundant alias expressions" + "text": "Identifier should be quoted" }, "fullDescription": { - "text": "Reports alias expressions that duplicate names of columns in tables and might be redundant. Example (PostgreSQL): 'CREATE TABLE foo(a INT, b INT);\n\nSELECT * FROM foo foo(a, b);\nSELECT * FROM foo foo(a);\nSELECT * FROM foo foo(x);\nSELECT * FROM foo foo(x, y);' The first two aliases use the same column names as in the 'foo' table. They are considered redundant because they column names are identical.", - "markdown": "Reports alias expressions that duplicate names of columns in tables and might be redundant.\n\nExample (PostgreSQL):\n\n CREATE TABLE foo(a INT, b INT);\n\n SELECT * FROM foo foo(a, b);\n SELECT * FROM foo foo(a);\n SELECT * FROM foo foo(x);\n SELECT * FROM foo foo(x, y);\n\nThe first two aliases use the same column names as in the `foo` table. They are considered redundant because they\ncolumn names are identical." + "text": "Reports situations when you use SQL reserved keywords as identifier names in your query. Example (Microsoft SQL Server): 'CREATE TABLE select (identity INT IDENTITY NOT NULL, order INT NOT NULL);' We use 'select', 'identity', and 'order' as table and column names. But they are also reserved keywords in Microsoft SQL Server. Therefore, in order to use them as object names in the query, you must quote these identifiers. To quote them, you can use the Quote identifier quick-fix. After the quick-fix is applied: 'CREATE TABLE [select] ([identity] INT IDENTITY NOT NULL, [order] INT NOT NULL);'", + "markdown": "Reports situations when you use SQL reserved keywords as identifier names in your query.\n\nExample (Microsoft SQL Server):\n\n CREATE TABLE select (identity INT IDENTITY NOT NULL, order INT NOT NULL);\n\nWe use `select`, `identity`, and `order` as table and column names.\nBut they are also reserved keywords in Microsoft SQL Server.\nTherefore, in order to use them as object names in the query, you must quote these identifiers. To quote them, you can use the\n**Quote identifier** quick-fix.\n\nAfter the quick-fix is applied:\n\n CREATE TABLE [select] ([identity] INT IDENTITY NOT NULL, [order] INT NOT NULL);\n" }, "defaultConfiguration": { "enabled": false, @@ -5002,13 +5002,13 @@ ] }, { - "id": "SqlIdentifierInspection", + "id": "SqlRedundantAliasInspection", "shortDescription": { - "text": "Identifier should be quoted" + "text": "Redundant alias expressions" }, "fullDescription": { - "text": "Reports situations when you use SQL reserved keywords as identifier names in your query. Example (Microsoft SQL Server): 'CREATE TABLE select (identity INT IDENTITY NOT NULL, order INT NOT NULL);' We use 'select', 'identity', and 'order' as table and column names. But they are also reserved keywords in Microsoft SQL Server. Therefore, in order to use them as object names in the query, you must quote these identifiers. To quote them, you can use the Quote identifier quick-fix. After the quick-fix is applied: 'CREATE TABLE [select] ([identity] INT IDENTITY NOT NULL, [order] INT NOT NULL);'", - "markdown": "Reports situations when you use SQL reserved keywords as identifier names in your query.\n\nExample (Microsoft SQL Server):\n\n CREATE TABLE select (identity INT IDENTITY NOT NULL, order INT NOT NULL);\n\nWe use `select`, `identity`, and `order` as table and column names.\nBut they are also reserved keywords in Microsoft SQL Server.\nTherefore, in order to use them as object names in the query, you must quote these identifiers. To quote them, you can use the\n**Quote identifier** quick-fix.\n\nAfter the quick-fix is applied:\n\n CREATE TABLE [select] ([identity] INT IDENTITY NOT NULL, [order] INT NOT NULL);\n" + "text": "Reports alias expressions that duplicate names of columns in tables and might be redundant. Example (PostgreSQL): 'CREATE TABLE foo(a INT, b INT);\n\nSELECT * FROM foo foo(a, b);\nSELECT * FROM foo foo(a);\nSELECT * FROM foo foo(x);\nSELECT * FROM foo foo(x, y);' The first two aliases use the same column names as in the 'foo' table. They are considered redundant because they column names are identical.", + "markdown": "Reports alias expressions that duplicate names of columns in tables and might be redundant.\n\nExample (PostgreSQL):\n\n CREATE TABLE foo(a INT, b INT);\n\n SELECT * FROM foo foo(a, b);\n SELECT * FROM foo foo(a);\n SELECT * FROM foo foo(x);\n SELECT * FROM foo foo(x, y);\n\nThe first two aliases use the same column names as in the `foo` table. They are considered redundant because they\ncolumn names are identical." }, "defaultConfiguration": { "enabled": false, @@ -5188,13 +5188,13 @@ ] }, { - "id": "SqlSideEffectsInspection", + "id": "SqlDtInspection", "shortDescription": { - "text": "Statement with side effects" + "text": "Ill-formed date/time literals" }, "fullDescription": { - "text": "Reports statements that might lead to modification of a database during a read-only connection. To enable read-only mode for a connection, right-click a data source in the Database tool window (View | Tool Windows | Database) and select Properties. In the Data Sources and Drivers dialog, click the Options tab and select the Read-only checkbox. Example (MySQL): 'CREATE TABLE foo(a INT);\nINSERT INTO foo VALUES (1);' As 'CREATE TABLE' and 'INSERT INTO' statements lead to a database modification, these statements will be highlighted in read-only connection mode.", - "markdown": "Reports statements that might lead to modification of a database during a read-only connection.\n\nTo enable read-only mode for a\nconnection,\nright-click a data source in the **Database** tool window (**View \\| Tool Windows \\| Database** ) and select **Properties** .\nIn the **Data Sources and Drivers** dialog, click the **Options** tab and select the **Read-only** checkbox.\n\nExample (MySQL):\n\n CREATE TABLE foo(a INT);\n INSERT INTO foo VALUES (1);\n\nAs `CREATE TABLE` and `INSERT INTO` statements lead to a database modification, these statements will be highlighted\nin read-only connection mode." + "text": "Reports errors in date and time literals. This inspection is available in MySQL, Oracle, Db2, and H2. Example (MySQL): 'SELECT TIME '10 -12:13:14' FROM dual;\nSELECT TIME ' 12 : 13 : 14 ' FROM dual;\nSELECT TIME '12 13 14' FROM dual;\nSELECT TIME '12-13-14' FROM dual;\nSELECT TIME '12.13.14' FROM dual;\nSELECT TIME '12:13:' FROM dual;\nSELECT TIME '12:13' FROM dual;\nSELECT TIME '12:' FROM dual;' In this example, dates ignore the MySQL standard for date and time literals. Therefore, they will be highlighted. For more information about date and time literals in MySQL, see Date and Time Literals at dev.mysql.com. The following date and type literals are valid for MySQL. 'SELECT TIME '12:13:14' FROM dual;\nSELECT TIME '12:13:14.555' FROM dual;\nSELECT TIME '12:13:14.' FROM dual;\nSELECT TIME '-12:13:14' FROM dual;\nSELECT TIME '10 12:13:14' FROM dual;\nSELECT TIME '-10 12:13:14' FROM dual;'", + "markdown": "Reports errors in date and time literals. This inspection is available in MySQL, Oracle, Db2, and H2.\n\nExample (MySQL):\n\n SELECT TIME '10 -12:13:14' FROM dual;\n SELECT TIME ' 12 : 13 : 14 ' FROM dual;\n SELECT TIME '12 13 14' FROM dual;\n SELECT TIME '12-13-14' FROM dual;\n SELECT TIME '12.13.14' FROM dual;\n SELECT TIME '12:13:' FROM dual;\n SELECT TIME '12:13' FROM dual;\n SELECT TIME '12:' FROM dual;\n\nIn this example, dates ignore the MySQL standard for date and time literals. Therefore, they will be highlighted.\nFor more information about date and time literals in MySQL, see [Date and Time Literals at dev.mysql.com](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html).\n\nThe following date and type literals are valid for MySQL.\n\n SELECT TIME '12:13:14' FROM dual;\n SELECT TIME '12:13:14.555' FROM dual;\n SELECT TIME '12:13:14.' FROM dual;\n SELECT TIME '-12:13:14' FROM dual;\n SELECT TIME '10 12:13:14' FROM dual;\n SELECT TIME '-10 12:13:14' FROM dual;\n" }, "defaultConfiguration": { "enabled": false, @@ -5219,13 +5219,13 @@ ] }, { - "id": "SqlDtInspection", + "id": "SqlSideEffectsInspection", "shortDescription": { - "text": "Ill-formed date/time literals" + "text": "Statement with side effects" }, "fullDescription": { - "text": "Reports errors in date and time literals. This inspection is available in MySQL, Oracle, Db2, and H2. Example (MySQL): 'SELECT TIME '10 -12:13:14' FROM dual;\nSELECT TIME ' 12 : 13 : 14 ' FROM dual;\nSELECT TIME '12 13 14' FROM dual;\nSELECT TIME '12-13-14' FROM dual;\nSELECT TIME '12.13.14' FROM dual;\nSELECT TIME '12:13:' FROM dual;\nSELECT TIME '12:13' FROM dual;\nSELECT TIME '12:' FROM dual;' In this example, dates ignore the MySQL standard for date and time literals. Therefore, they will be highlighted. For more information about date and time literals in MySQL, see Date and Time Literals at dev.mysql.com. The following date and type literals are valid for MySQL. 'SELECT TIME '12:13:14' FROM dual;\nSELECT TIME '12:13:14.555' FROM dual;\nSELECT TIME '12:13:14.' FROM dual;\nSELECT TIME '-12:13:14' FROM dual;\nSELECT TIME '10 12:13:14' FROM dual;\nSELECT TIME '-10 12:13:14' FROM dual;'", - "markdown": "Reports errors in date and time literals. This inspection is available in MySQL, Oracle, Db2, and H2.\n\nExample (MySQL):\n\n SELECT TIME '10 -12:13:14' FROM dual;\n SELECT TIME ' 12 : 13 : 14 ' FROM dual;\n SELECT TIME '12 13 14' FROM dual;\n SELECT TIME '12-13-14' FROM dual;\n SELECT TIME '12.13.14' FROM dual;\n SELECT TIME '12:13:' FROM dual;\n SELECT TIME '12:13' FROM dual;\n SELECT TIME '12:' FROM dual;\n\nIn this example, dates ignore the MySQL standard for date and time literals. Therefore, they will be highlighted.\nFor more information about date and time literals in MySQL, see [Date and Time Literals at dev.mysql.com](https://dev.mysql.com/doc/refman/8.0/en/date-and-time-literals.html).\n\nThe following date and type literals are valid for MySQL.\n\n SELECT TIME '12:13:14' FROM dual;\n SELECT TIME '12:13:14.555' FROM dual;\n SELECT TIME '12:13:14.' FROM dual;\n SELECT TIME '-12:13:14' FROM dual;\n SELECT TIME '10 12:13:14' FROM dual;\n SELECT TIME '-10 12:13:14' FROM dual;\n" + "text": "Reports statements that might lead to modification of a database during a read-only connection. To enable read-only mode for a connection, right-click a data source in the Database tool window (View | Tool Windows | Database) and select Properties. In the Data Sources and Drivers dialog, click the Options tab and select the Read-only checkbox. Example (MySQL): 'CREATE TABLE foo(a INT);\nINSERT INTO foo VALUES (1);' As 'CREATE TABLE' and 'INSERT INTO' statements lead to a database modification, these statements will be highlighted in read-only connection mode.", + "markdown": "Reports statements that might lead to modification of a database during a read-only connection.\n\nTo enable read-only mode for a\nconnection,\nright-click a data source in the **Database** tool window (**View \\| Tool Windows \\| Database** ) and select **Properties** .\nIn the **Data Sources and Drivers** dialog, click the **Options** tab and select the **Read-only** checkbox.\n\nExample (MySQL):\n\n CREATE TABLE foo(a INT);\n INSERT INTO foo VALUES (1);\n\nAs `CREATE TABLE` and `INSERT INTO` statements lead to a database modification, these statements will be highlighted\nin read-only connection mode." }, "defaultConfiguration": { "enabled": false, @@ -5622,26 +5622,26 @@ ] }, { - "id": "SqlCallNotationInspection", + "id": "MysqlParsingInspection", "shortDescription": { - "text": "Using of named and positional arguments" + "text": "Unsupported syntax in pre-8.0 versions" }, "fullDescription": { - "text": "Reports calls in which positional arguments go after the named ones. Works in PostgreSQL, Oracle, and Db2. Example (In PostgreSQL): 'CREATE FUNCTION foo(a int, b int, c int) RETURNS int\n LANGUAGE plpgsql AS\n$$\nBEGIN\n RETURN a + b + c;\nEND\n$$;\nSELECT foo(a => 1, b => 2, c => 3);\n -- `3` goes after the named argument\nSELECT foo(1, b => 2, 3);\n -- `1` and `3` go after the named argument\nSELECT foo(b => 2, 1, 3);'", - "markdown": "Reports calls in which positional arguments go after the named ones. Works in PostgreSQL, Oracle, and Db2.\n\nExample (In PostgreSQL):\n\n CREATE FUNCTION foo(a int, b int, c int) RETURNS int\n LANGUAGE plpgsql AS\n $$\n BEGIN\n RETURN a + b + c;\n END\n $$;\n SELECT foo(a => 1, b => 2, c => 3);\n -- `3` goes after the named argument\n SELECT foo(1, b => 2, 3);\n -- `1` and `3` go after the named argument\n SELECT foo(b => 2, 1, 3);\n" + "text": "Reports invalid usages of UNION in queries. The inspection works in MySQL versions that are earlier than 8.0. Example (MySQL): 'SELECT * FROM (SELECT 1 UNION (SELECT 1 UNION SELECT 2)) a;'", + "markdown": "Reports invalid usages of UNION in queries.\n\nThe inspection works in MySQL versions that are earlier than 8.0.\n\nExample (MySQL):\n\n\n SELECT * FROM (SELECT 1 UNION (SELECT 1 UNION SELECT 2)) a;\n" }, "defaultConfiguration": { "enabled": false, - "level": "error", + "level": "warning", "parameters": { - "ideaSeverity": "ERROR" + "ideaSeverity": "WARNING" } }, "relationships": [ { "target": { - "id": "SQL", - "index": 5, + "id": "MySQL", + "index": 11, "toolComponent": { "name": "QDPY" } @@ -5653,26 +5653,26 @@ ] }, { - "id": "MysqlParsingInspection", + "id": "SqlCallNotationInspection", "shortDescription": { - "text": "Unsupported syntax in pre-8.0 versions" + "text": "Using of named and positional arguments" }, "fullDescription": { - "text": "Reports invalid usages of UNION in queries. The inspection works in MySQL versions that are earlier than 8.0. Example (MySQL): 'SELECT * FROM (SELECT 1 UNION (SELECT 1 UNION SELECT 2)) a;'", - "markdown": "Reports invalid usages of UNION in queries.\n\nThe inspection works in MySQL versions that are earlier than 8.0.\n\nExample (MySQL):\n\n\n SELECT * FROM (SELECT 1 UNION (SELECT 1 UNION SELECT 2)) a;\n" + "text": "Reports calls in which positional arguments go after the named ones. Works in PostgreSQL, Oracle, and Db2. Example (In PostgreSQL): 'CREATE FUNCTION foo(a int, b int, c int) RETURNS int\n LANGUAGE plpgsql AS\n$$\nBEGIN\n RETURN a + b + c;\nEND\n$$;\nSELECT foo(a => 1, b => 2, c => 3);\n -- `3` goes after the named argument\nSELECT foo(1, b => 2, 3);\n -- `1` and `3` go after the named argument\nSELECT foo(b => 2, 1, 3);'", + "markdown": "Reports calls in which positional arguments go after the named ones. Works in PostgreSQL, Oracle, and Db2.\n\nExample (In PostgreSQL):\n\n CREATE FUNCTION foo(a int, b int, c int) RETURNS int\n LANGUAGE plpgsql AS\n $$\n BEGIN\n RETURN a + b + c;\n END\n $$;\n SELECT foo(a => 1, b => 2, c => 3);\n -- `3` goes after the named argument\n SELECT foo(1, b => 2, 3);\n -- `1` and `3` go after the named argument\n SELECT foo(b => 2, 1, 3);\n" }, "defaultConfiguration": { "enabled": false, - "level": "warning", + "level": "error", "parameters": { - "ideaSeverity": "WARNING" + "ideaSeverity": "ERROR" } }, "relationships": [ { "target": { - "id": "MySQL", - "index": 11, + "id": "SQL", + "index": 5, "toolComponent": { "name": "QDPY" } @@ -6378,26 +6378,26 @@ ] }, { - "id": "XmlDuplicatedId", + "id": "JsonSchemaDeprecation", "shortDescription": { - "text": "Duplicate 'id' attribute" + "text": "Deprecated JSON property" }, "fullDescription": { - "text": "Reports a duplicate 'id' attribute in XML.", - "markdown": "Reports a duplicate `id` attribute in XML." + "text": "Reports a deprecated property in a JSON file. Note that deprecation mechanism is not defined in the JSON Schema specification yet, and this inspection uses a non-standard extension 'deprecationMessage'.", + "markdown": "Reports a deprecated property in a JSON file. \nNote that deprecation mechanism is not defined in the JSON Schema specification yet, and this inspection uses a non-standard extension 'deprecationMessage'." }, "defaultConfiguration": { "enabled": false, - "level": "error", + "level": "note", "parameters": { - "ideaSeverity": "ERROR" + "ideaSeverity": "WEAK WARNING" } }, "relationships": [ { "target": { - "id": "XML", - "index": 6, + "id": "JSON and JSON5", + "index": 8, "toolComponent": { "name": "QDPY" } @@ -6409,26 +6409,26 @@ ] }, { - "id": "JsonSchemaDeprecation", + "id": "XmlDuplicatedId", "shortDescription": { - "text": "Deprecated JSON property" + "text": "Duplicate 'id' attribute" }, "fullDescription": { - "text": "Reports a deprecated property in a JSON file. Note that deprecation mechanism is not defined in the JSON Schema specification yet, and this inspection uses a non-standard extension 'deprecationMessage'.", - "markdown": "Reports a deprecated property in a JSON file. \nNote that deprecation mechanism is not defined in the JSON Schema specification yet, and this inspection uses a non-standard extension 'deprecationMessage'." + "text": "Reports a duplicate 'id' attribute in XML.", + "markdown": "Reports a duplicate `id` attribute in XML." }, "defaultConfiguration": { "enabled": false, - "level": "note", + "level": "error", "parameters": { - "ideaSeverity": "WEAK WARNING" + "ideaSeverity": "ERROR" } }, "relationships": [ { "target": { - "id": "JSON and JSON5", - "index": 8, + "id": "XML", + "index": 6, "toolComponent": { "name": "QDPY" } @@ -6595,26 +6595,26 @@ ] }, { - "id": "DuplicatedCode", + "id": "RequiredAttributes", "shortDescription": { - "text": "Duplicated code fragment" + "text": "Missing required attribute" }, "fullDescription": { - "text": "Reports duplicated blocks of code from the selected scope: the same file or the entire project. The inspection features quick-fixes that help you to set the size of detected duplicates, navigate to repetitive code fragments, and compare them in a tool window. The inspection options allow you to select the scope of the reported duplicated fragments and set the initial size for the duplicated language constructs. You can also configure the constructs that you want to anonymize in File | Settings | Editor | Duplicates.", - "markdown": "Reports duplicated blocks of code from the selected scope: the same file or the entire project. The inspection features quick-fixes that help you to set the size of detected duplicates, navigate to repetitive code fragments, and compare them in a tool window. The inspection options allow you to select the scope of the reported duplicated fragments and set the initial size for the duplicated language constructs. You can also configure the constructs that you want to anonymize in [File \\| Settings \\| Editor \\| Duplicates](settings://duplicates.index)." + "text": "Reports a missing mandatory attribute in an XML/HTML tag. Suggests configuring attributes that should not be reported.", + "markdown": "Reports a missing mandatory attribute in an XML/HTML tag. Suggests configuring attributes that should not be reported." }, "defaultConfiguration": { "enabled": false, - "level": "note", + "level": "warning", "parameters": { - "ideaSeverity": "WEAK WARNING" + "ideaSeverity": "WARNING" } }, "relationships": [ { "target": { - "id": "General", - "index": 17, + "id": "HTML", + "index": 15, "toolComponent": { "name": "QDPY" } @@ -6626,26 +6626,26 @@ ] }, { - "id": "RequiredAttributes", + "id": "DuplicatedCode", "shortDescription": { - "text": "Missing required attribute" + "text": "Duplicated code fragment" }, "fullDescription": { - "text": "Reports a missing mandatory attribute in an XML/HTML tag. Suggests configuring attributes that should not be reported.", - "markdown": "Reports a missing mandatory attribute in an XML/HTML tag. Suggests configuring attributes that should not be reported." + "text": "Reports duplicated blocks of code from the selected scope: the same file or the entire project. The inspection features quick-fixes that help you to set the size of detected duplicates, navigate to repetitive code fragments, and compare them in a tool window. The inspection options allow you to select the scope of the reported duplicated fragments and set the initial size for the duplicated language constructs. You can also configure the constructs that you want to anonymize in File | Settings | Editor | Duplicates.", + "markdown": "Reports duplicated blocks of code from the selected scope: the same file or the entire project. The inspection features quick-fixes that help you to set the size of detected duplicates, navigate to repetitive code fragments, and compare them in a tool window. The inspection options allow you to select the scope of the reported duplicated fragments and set the initial size for the duplicated language constructs. You can also configure the constructs that you want to anonymize in [File \\| Settings \\| Editor \\| Duplicates](settings://duplicates.index)." }, "defaultConfiguration": { "enabled": false, - "level": "warning", + "level": "note", "parameters": { - "ideaSeverity": "WARNING" + "ideaSeverity": "WEAK WARNING" } }, "relationships": [ { "target": { - "id": "HTML", - "index": 15, + "id": "General", + "index": 17, "toolComponent": { "name": "QDPY" } @@ -7525,16 +7525,16 @@ ] }, { - "id": "RegExpRedundantNestedCharacterClass", + "id": "XmlDeprecatedElement", "shortDescription": { - "text": "Redundant nested character class" + "text": "Deprecated symbol" }, "fullDescription": { - "text": "Reports unnecessary nested character classes. Example: '[a-c[x-z]]' After the quick-fix is applied: '[a-cx-z]' New in 2020.2", - "markdown": "Reports unnecessary nested character classes.\n\n**Example:**\n\n\n [a-c[x-z]]\n\nAfter the quick-fix is applied:\n\n\n [a-cx-z]\n\nNew in 2020.2" + "text": "Reports a deprecated XML element or attribute. Symbols can be marked by XML comment or documentation tag with text 'deprecated'.", + "markdown": "Reports a deprecated XML element or attribute.\n\nSymbols can be marked by XML comment or documentation tag with text 'deprecated'." }, "defaultConfiguration": { - "enabled": true, + "enabled": false, "level": "warning", "parameters": { "ideaSeverity": "WARNING" @@ -7543,8 +7543,8 @@ "relationships": [ { "target": { - "id": "RegExp", - "index": 10, + "id": "XML", + "index": 6, "toolComponent": { "name": "QDPY" } @@ -7556,16 +7556,16 @@ ] }, { - "id": "XmlDeprecatedElement", + "id": "RegExpRedundantNestedCharacterClass", "shortDescription": { - "text": "Deprecated symbol" + "text": "Redundant nested character class" }, "fullDescription": { - "text": "Reports a deprecated XML element or attribute. Symbols can be marked by XML comment or documentation tag with text 'deprecated'.", - "markdown": "Reports a deprecated XML element or attribute.\n\nSymbols can be marked by XML comment or documentation tag with text 'deprecated'." + "text": "Reports unnecessary nested character classes. Example: '[a-c[x-z]]' After the quick-fix is applied: '[a-cx-z]' New in 2020.2", + "markdown": "Reports unnecessary nested character classes.\n\n**Example:**\n\n\n [a-c[x-z]]\n\nAfter the quick-fix is applied:\n\n\n [a-cx-z]\n\nNew in 2020.2" }, "defaultConfiguration": { - "enabled": false, + "enabled": true, "level": "warning", "parameters": { "ideaSeverity": "WARNING" @@ -7574,8 +7574,8 @@ "relationships": [ { "target": { - "id": "XML", - "index": 6, + "id": "RegExp", + "index": 10, "toolComponent": { "name": "QDPY" } @@ -7680,26 +7680,26 @@ ] }, { - "id": "HtmlExtraClosingTag", + "id": "RegExpOctalEscape", "shortDescription": { - "text": "Redundant closing tag" + "text": "Octal escape" }, "fullDescription": { - "text": "Reports redundant closing tags on empty elements, for example, 'img' or 'br'. Example: '\n \n

\n \n ' After the quick-fix is applied: '\n \n
\n \n '", - "markdown": "Reports redundant closing tags on empty elements, for example, `img` or `br`.\n\n**Example:**\n\n\n \n \n

\n \n \n\nAfter the quick-fix is applied:\n\n\n \n \n
\n \n \n" + "text": "Reports octal escapes, which are easily confused with back references. Use hexadecimal escapes to avoid confusion. Example: '\\07' After the quick-fix is applied: '\\x07' New in 2017.1", + "markdown": "Reports octal escapes, which are easily confused with back references. Use hexadecimal escapes to avoid confusion.\n\n**Example:**\n\n\n \\07\n\nAfter the quick-fix is applied:\n\n\n \\x07\n\nNew in 2017.1" }, "defaultConfiguration": { - "enabled": false, - "level": "error", + "enabled": true, + "level": "note", "parameters": { - "ideaSeverity": "ERROR" + "ideaSeverity": "INFORMATION" } }, "relationships": [ { "target": { - "id": "HTML", - "index": 15, + "id": "RegExp", + "index": 10, "toolComponent": { "name": "QDPY" } @@ -7711,26 +7711,26 @@ ] }, { - "id": "RegExpOctalEscape", + "id": "HtmlExtraClosingTag", "shortDescription": { - "text": "Octal escape" + "text": "Redundant closing tag" }, "fullDescription": { - "text": "Reports octal escapes, which are easily confused with back references. Use hexadecimal escapes to avoid confusion. Example: '\\07' After the quick-fix is applied: '\\x07' New in 2017.1", - "markdown": "Reports octal escapes, which are easily confused with back references. Use hexadecimal escapes to avoid confusion.\n\n**Example:**\n\n\n \\07\n\nAfter the quick-fix is applied:\n\n\n \\x07\n\nNew in 2017.1" + "text": "Reports redundant closing tags on empty elements, for example, 'img' or 'br'. Example: '\n \n

\n \n ' After the quick-fix is applied: '\n \n
\n \n '", + "markdown": "Reports redundant closing tags on empty elements, for example, `img` or `br`.\n\n**Example:**\n\n\n \n \n

\n \n \n\nAfter the quick-fix is applied:\n\n\n \n \n
\n \n \n" }, "defaultConfiguration": { - "enabled": true, - "level": "note", + "enabled": false, + "level": "error", "parameters": { - "ideaSeverity": "INFORMATION" + "ideaSeverity": "ERROR" } }, "relationships": [ { "target": { - "id": "RegExp", - "index": 10, + "id": "HTML", + "index": 15, "toolComponent": { "name": "QDPY" } @@ -7742,13 +7742,13 @@ ] }, { - "id": "HtmlUnknownAnchorTarget", + "id": "UnusedDefine", "shortDescription": { - "text": "Unresolved fragment in a link" + "text": "Unused define" }, "fullDescription": { - "text": "Reports an unresolved last part of an URL after the '#' sign.", - "markdown": "Reports an unresolved last part of an URL after the `#` sign." + "text": "Reports an unused named pattern ('define') in a RELAX-NG file (XML or Compact Syntax). 'define' elements that are used through an include in another file are ignored.", + "markdown": "Reports an unused named pattern (`define`) in a RELAX-NG file (XML or Compact Syntax). `define` elements that are used through an include in another file are ignored." }, "defaultConfiguration": { "enabled": false, @@ -7760,8 +7760,8 @@ "relationships": [ { "target": { - "id": "HTML", - "index": 15, + "id": "RELAX NG", + "index": 25, "toolComponent": { "name": "QDPY" } @@ -7773,13 +7773,13 @@ ] }, { - "id": "UnusedDefine", + "id": "HtmlUnknownAnchorTarget", "shortDescription": { - "text": "Unused define" + "text": "Unresolved fragment in a link" }, "fullDescription": { - "text": "Reports an unused named pattern ('define') in a RELAX-NG file (XML or Compact Syntax). 'define' elements that are used through an include in another file are ignored.", - "markdown": "Reports an unused named pattern (`define`) in a RELAX-NG file (XML or Compact Syntax). `define` elements that are used through an include in another file are ignored." + "text": "Reports an unresolved last part of an URL after the '#' sign.", + "markdown": "Reports an unresolved last part of an URL after the `#` sign." }, "defaultConfiguration": { "enabled": false, @@ -7791,8 +7791,8 @@ "relationships": [ { "target": { - "id": "RELAX NG", - "index": 25, + "id": "HTML", + "index": 15, "toolComponent": { "name": "QDPY" } @@ -8176,13 +8176,13 @@ ] }, { - "id": "RegExpDuplicateAlternationBranch", + "id": "RegExpRepeatedSpace", "shortDescription": { - "text": "Duplicate branch in alternation" + "text": "Consecutive spaces" }, "fullDescription": { - "text": "Reports duplicate branches in a RegExp alternation. Duplicate branches slow down matching and obscure the intent of the expression. Example: '(alpha|bravo|charlie|alpha)' After the quick-fix is applied: '(alpha|bravo|charlie)' New in 2017.1", - "markdown": "Reports duplicate branches in a RegExp alternation. Duplicate branches slow down matching and obscure the intent of the expression.\n\n**Example:**\n\n\n (alpha|bravo|charlie|alpha)\n\nAfter the quick-fix is applied:\n\n\n (alpha|bravo|charlie)\n\nNew in 2017.1" + "text": "Reports multiple consecutive spaces in a RegExp. Because spaces are not visible by default, it can be hard to see how many spaces are required. The RegExp can be made more clear by replacing the consecutive spaces with a single space and a counted quantifier. Example: '( )' After the quick-fix is applied: '( {5})' New in 2017.1", + "markdown": "Reports multiple consecutive spaces in a RegExp. Because spaces are not visible by default, it can be hard to see how many spaces are required. The RegExp can be made more clear by replacing the consecutive spaces with a single space and a counted quantifier.\n\n**Example:**\n\n\n ( )\n\nAfter the quick-fix is applied:\n\n\n ( {5})\n\n\nNew in 2017.1" }, "defaultConfiguration": { "enabled": true, @@ -8207,13 +8207,13 @@ ] }, { - "id": "RegExpRepeatedSpace", + "id": "RegExpDuplicateAlternationBranch", "shortDescription": { - "text": "Consecutive spaces" + "text": "Duplicate branch in alternation" }, "fullDescription": { - "text": "Reports multiple consecutive spaces in a RegExp. Because spaces are not visible by default, it can be hard to see how many spaces are required. The RegExp can be made more clear by replacing the consecutive spaces with a single space and a counted quantifier. Example: '( )' After the quick-fix is applied: '( {5})' New in 2017.1", - "markdown": "Reports multiple consecutive spaces in a RegExp. Because spaces are not visible by default, it can be hard to see how many spaces are required. The RegExp can be made more clear by replacing the consecutive spaces with a single space and a counted quantifier.\n\n**Example:**\n\n\n ( )\n\nAfter the quick-fix is applied:\n\n\n ( {5})\n\n\nNew in 2017.1" + "text": "Reports duplicate branches in a RegExp alternation. Duplicate branches slow down matching and obscure the intent of the expression. Example: '(alpha|bravo|charlie|alpha)' After the quick-fix is applied: '(alpha|bravo|charlie)' New in 2017.1", + "markdown": "Reports duplicate branches in a RegExp alternation. Duplicate branches slow down matching and obscure the intent of the expression.\n\n**Example:**\n\n\n (alpha|bravo|charlie|alpha)\n\nAfter the quick-fix is applied:\n\n\n (alpha|bravo|charlie)\n\nNew in 2017.1" }, "defaultConfiguration": { "enabled": true, @@ -10279,7 +10279,7 @@ }, "invocations": [ { - "exitCode": 255, + "exitCode": 0, "toolExecutionNotifications": [ { "message": { @@ -10288,19 +10288,18 @@ "level": "error" } ], - "exitCodeDescription": "Qodana reached failThreshold", "executionSuccessful": true } ], "language": "en-US", "versionControlProvenance": [ { - "repositoryUri": "ssh://git@github.com/Pale-Blue-Dot-97/Minerva.git", - "revisionId": "9464a885eacac51d5b02afa85adc5b52fb2d276e", - "branch": "optional-dependencies", + "repositoryUri": "https://github.com/Pale-Blue-Dot-97/Minerva.git", + "revisionId": "67d59902b54868233f7e8d0a07b77a2e5adb793c", + "branch": "tg_nb_exp", "properties": { - "repoUrl": "", - "lastAuthorName": "Harry", + "repoUrl": "https://github.com/Pale-Blue-Dot-97/Minerva.git", + "lastAuthorName": "Harry Baker", "vcsType": "Git", "lastAuthorEmail": "hjbaker97@gmail.com" } @@ -10312,33 +10311,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Parameter 'kwargs' value is not used", - "markdown": "Parameter 'kwargs' value is not used" + "text": "Parameter 'out_shape' value is not used", + "markdown": "Parameter 'out_shape' value is not used" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/modelio.py", + "uri": "minerva/logger.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 55, - "startColumn": 5, - "charOffset": 2386, - "charLength": 8, + "startLine": 578, + "startColumn": 9, + "charOffset": 21576, + "charLength": 43, "snippet": { - "text": "**kwargs" + "text": "out_shape: Optional[Tuple[int, ...]] = None" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 53, + "startLine": 576, "startColumn": 1, - "charOffset": 2311, - "charLength": 258, + "charOffset": 21519, + "charLength": 175, "snippet": { - "text": " device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], Tensor, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a supervised model using :mod:`torchgeo` datasets." + "text": " batch_size: int,\n n_samples: int,\n out_shape: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n record_int: bool = True," } } }, @@ -10351,7 +10350,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "1a6f436cd859b263a5417ca6e02cf7b72af72f7dc2ebebf2b6097a6c1d15f0c8" + "equalIndicator/v1": "804e8b692591e2448548b29ec0a7c987d578e903f253f6c5bf0a2078f9d39315" }, "baselineState": "unchanged", "properties": { @@ -10366,33 +10365,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Parameter 'kwargs' value is not used", - "markdown": "Parameter 'kwargs' value is not used" + "text": "Parameter 'n_classes' value is not used", + "markdown": "Parameter 'n_classes' value is not used" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/modelio.py", + "uri": "minerva/logger.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 195, - "startColumn": 5, - "charOffset": 7469, - "charLength": 8, + "startLine": 579, + "startColumn": 9, + "charOffset": 21629, + "charLength": 31, "snippet": { - "text": "**kwargs" + "text": "n_classes: Optional[int] = None" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 193, + "startLine": 577, "startColumn": 1, - "charOffset": 7394, - "charLength": 269, + "charOffset": 21544, + "charLength": 186, "snippet": { - "text": " device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], None, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a self-supervised Siamese model using :mod:`torchgeo` datasets." + "text": " n_samples: int,\n out_shape: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n record_int: bool = True,\n record_float: bool = False," } } }, @@ -10405,7 +10404,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "447b8c62912dad134a1662b86dcc158e212e373002fcbc3ced22ea9fc772ee18" + "equalIndicator/v1": "90899b7ee05f6559006df5787bd002283b7c4fcaee4ea7b2bbee470ddbeef5f3" }, "baselineState": "unchanged", "properties": { @@ -10431,9 +10430,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 185, + "startLine": 191, "startColumn": 9, - "charOffset": 6988, + "charOffset": 7383, "charLength": 8, "snippet": { "text": "**params" @@ -10441,9 +10440,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 183, + "startLine": 189, "startColumn": 1, - "charOffset": 6897, + "charOffset": 7292, "charLength": 156, "snippet": { "text": " data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n **params,\n ) -> None:\n super(SPMetrics, self).__init__(" @@ -10459,7 +10458,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7c37085cf926ae56cfa01a29a96f6350ec650b01affca197d725698ada103448" + "equalIndicator/v1": "258e2a486787dc4f4600b1502edf223b9558273611697f6e425f3597680cd442" }, "baselineState": "unchanged", "properties": { @@ -10485,9 +10484,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 281, + "startLine": 287, "startColumn": 9, - "charOffset": 10481, + "charOffset": 10876, "charLength": 8, "snippet": { "text": "**params" @@ -10495,9 +10494,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 279, + "startLine": 285, "startColumn": 1, - "charOffset": 10395, + "charOffset": 10790, "charLength": 152, "snippet": { "text": " model_type: str = \"segmentation\",\n sample_pairs: bool = False,\n **params,\n ) -> None:\n super(SSLMetrics, self).__init__(" @@ -10513,7 +10512,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "e0f42a5fb5ca6742701a3903de7d07c439196e69d1ed9b45d3a952a65ef7d51e" + "equalIndicator/v1": "a4ea1133ec02f97abaa332011591ac3b3be856a95895f65365c3a33c0510577b" }, "baselineState": "unchanged", "properties": { @@ -10528,33 +10527,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Parameter 'signum' value is not used", - "markdown": "Parameter 'signum' value is not used" + "text": "Local variable 'weights' value is not used", + "markdown": "Local variable 'weights' value is not used" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/utils/runner.py", + "uri": "minerva/models/core.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 320, - "startColumn": 21, - "charOffset": 9932, - "charLength": 6, + "startLine": 398, + "startColumn": 5, + "charOffset": 15001, + "charLength": 7, "snippet": { - "text": "signum" + "text": "weights" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 318, + "startLine": 396, "startColumn": 1, - "charOffset": 9730, - "charLength": 338, + "charOffset": 14870, + "charLength": 261, "snippet": { - "text": "# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}'," + "text": " :meth:`~torchvision.models._api.WeightsEnum.get_state_dict` to download the weights (if not already in cache).\n \"\"\"\n weights: Optional[WeightsEnum] = None\n try:\n weights = torch.hub.load(\"pytorch/vision\", \"get_weight\", name=weights_name)" } } }, @@ -10567,7 +10566,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "117270d9a225018baaf3c1db0e0848709525c5b6e447bbf4e624f2523d2a0760" + "equalIndicator/v1": "742b4b077d72215ab889e7fb046ebfe0a8ed258939d679564874376be7f673ac" }, "baselineState": "unchanged", "properties": { @@ -10582,8 +10581,8 @@ "kind": "fail", "level": "note", "message": { - "text": "Parameter 'frame' value is not used", - "markdown": "Parameter 'frame' value is not used" + "text": "Parameter 'signum' value is not used", + "markdown": "Parameter 'signum' value is not used" }, "locations": [ { @@ -10593,19 +10592,19 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 320, - "startColumn": 29, - "charOffset": 9940, - "charLength": 5, + "startLine": 326, + "startColumn": 21, + "charOffset": 10327, + "charLength": 6, "snippet": { - "text": "frame" + "text": "signum" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 318, + "startLine": 324, "startColumn": 1, - "charOffset": 9730, + "charOffset": 10125, "charLength": 338, "snippet": { "text": "# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}'," @@ -10621,7 +10620,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "bdc531641fc7676bb8d0beaadd85539ee51cd5e0972f02c46a4c245a392863e7" + "equalIndicator/v1": "7a166363bc80e03b0ee8c7bda03d1250179115a50d99b74692bb0cd182e4d47f" }, "baselineState": "unchanged", "properties": { @@ -10636,8 +10635,8 @@ "kind": "fail", "level": "note", "message": { - "text": "Parameter 'signum' value is not used", - "markdown": "Parameter 'signum' value is not used" + "text": "Parameter 'frame' value is not used", + "markdown": "Parameter 'frame' value is not used" }, "locations": [ { @@ -10647,22 +10646,22 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 328, - "startColumn": 21, - "charOffset": 10128, - "charLength": 6, + "startLine": 326, + "startColumn": 29, + "charOffset": 10335, + "charLength": 5, "snippet": { - "text": "signum" + "text": "frame" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 326, + "startLine": 324, "startColumn": 1, - "charOffset": 10106, - "charLength": 75, + "charOffset": 10125, + "charLength": 338, "snippet": { - "text": "\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n" + "text": "# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}'," } } }, @@ -10675,7 +10674,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "44e8231d5642fe590a412186e657af1ae83976cc2bdff58a23c2ce32739b2b56" + "equalIndicator/v1": "5e2e6da3f00bdac3978f0771888a0f887b6eb7b057e5f7b1acb844a6ee9620c0" }, "baselineState": "unchanged", "properties": { @@ -10690,8 +10689,8 @@ "kind": "fail", "level": "note", "message": { - "text": "Parameter 'frame' value is not used", - "markdown": "Parameter 'frame' value is not used" + "text": "Parameter 'signum' value is not used", + "markdown": "Parameter 'signum' value is not used" }, "locations": [ { @@ -10701,19 +10700,19 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 328, - "startColumn": 29, - "charOffset": 10136, - "charLength": 5, + "startLine": 334, + "startColumn": 21, + "charOffset": 10523, + "charLength": 6, "snippet": { - "text": "frame" + "text": "signum" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 326, + "startLine": 332, "startColumn": 1, - "charOffset": 10106, + "charOffset": 10501, "charLength": 75, "snippet": { "text": "\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n" @@ -10729,115 +10728,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7c02e413b6013786a3c5c95ea7c26415e4efd0e88f3d3e30e0847dadaf67cc3c" - }, - "baselineState": "unchanged", - "properties": { - "ideaSeverity": "WEAK WARNING", - "tags": [ - "Python" - ] - } - }, - { - "ruleId": "PyUnusedLocalInspection", - "kind": "fail", - "level": "note", - "message": { - "text": "Parameter 'out_shape' value is not used", - "markdown": "Parameter 'out_shape' value is not used" - }, - "locations": [ - { - "physicalLocation": { - "artifactLocation": { - "uri": "minerva/logger.py", - "uriBaseId": "SRCROOT" - }, - "region": { - "startLine": 572, - "startColumn": 9, - "charOffset": 21181, - "charLength": 43, - "snippet": { - "text": "out_shape: Optional[Tuple[int, ...]] = None" - }, - "sourceLanguage": "Python" - }, - "contextRegion": { - "startLine": 570, - "startColumn": 1, - "charOffset": 21124, - "charLength": 175, - "snippet": { - "text": " batch_size: int,\n n_samples: int,\n out_shape: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n record_int: bool = True," - } - } - }, - "logicalLocations": [ - { - "fullyQualifiedName": "project", - "kind": "module" - } - ] - } - ], - "partialFingerprints": { - "equalIndicator/v1": "7d905c643208b59ca50a5417db7df4b9c451fa52174069f83adc14f2337bb834" - }, - "baselineState": "unchanged", - "properties": { - "ideaSeverity": "WEAK WARNING", - "tags": [ - "Python" - ] - } - }, - { - "ruleId": "PyUnusedLocalInspection", - "kind": "fail", - "level": "note", - "message": { - "text": "Parameter 'n_classes' value is not used", - "markdown": "Parameter 'n_classes' value is not used" - }, - "locations": [ - { - "physicalLocation": { - "artifactLocation": { - "uri": "minerva/logger.py", - "uriBaseId": "SRCROOT" - }, - "region": { - "startLine": 573, - "startColumn": 9, - "charOffset": 21234, - "charLength": 31, - "snippet": { - "text": "n_classes: Optional[int] = None" - }, - "sourceLanguage": "Python" - }, - "contextRegion": { - "startLine": 571, - "startColumn": 1, - "charOffset": 21149, - "charLength": 186, - "snippet": { - "text": " n_samples: int,\n out_shape: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n record_int: bool = True,\n record_float: bool = False," - } - } - }, - "logicalLocations": [ - { - "fullyQualifiedName": "project", - "kind": "module" - } - ] - } - ], - "partialFingerprints": { - "equalIndicator/v1": "7d4b7f7cfa6256cd3db43bf7fca128fa86c569eed0eb70a5e1228eebd2906820" + "equalIndicator/v1": "ca10d2c6d9125d6006f8ba8fa10114af79e98bdcf96b0d762fe5b0a205a45e2e" }, "baselineState": "unchanged", "properties": { @@ -10852,33 +10743,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Local variable 'weights' value is not used", - "markdown": "Local variable 'weights' value is not used" + "text": "Parameter 'frame' value is not used", + "markdown": "Parameter 'frame' value is not used" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/core.py", + "uri": "minerva/utils/runner.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 392, - "startColumn": 5, - "charOffset": 14592, - "charLength": 7, + "startLine": 334, + "startColumn": 29, + "charOffset": 10531, + "charLength": 5, "snippet": { - "text": "weights" + "text": "frame" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 390, + "startLine": 332, "startColumn": 1, - "charOffset": 14461, - "charLength": 261, + "charOffset": 10501, + "charLength": 75, "snippet": { - "text": " :meth:`~torchvision.models._api.WeightsEnum.get_state_dict` to download the weights (if not already in cache).\n \"\"\"\n weights: Optional[WeightsEnum] = None\n try:\n weights = torch.hub.load(\"pytorch/vision\", \"get_weight\", name=weights_name)" + "text": "\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n" } } }, @@ -10891,7 +10782,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "0ff0356abb2a5b783e6987af3ffe63f941c6b1e0dc32969c0d79180b011ca005" + "equalIndicator/v1": "187840981eefe2cf54eedf36ede9cd05779e42d6ba8a3944e45cc4f8656c5762" }, "baselineState": "unchanged", "properties": { @@ -10917,9 +10808,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 406, + "startLine": 412, "startColumn": 5, - "charOffset": 14395, + "charOffset": 14790, "charLength": 11, "snippet": { "text": "module: str" @@ -10927,9 +10818,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 404, + "startLine": 410, "startColumn": 1, - "charOffset": 14359, + "charOffset": 14754, "charLength": 71, "snippet": { "text": "@overload\ndef _optional_import(\n module: str,\n *,\n name: None," @@ -10945,9 +10836,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "64f31e215076caad5303db7a9185c023d2e9cc4e1a21379aace9768bf9e53da2" + "equalIndicator/v1": "7c0923317aecc7f5880637a9ae418f4c2866519d9e80990f7c24406a3dc205c3" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -10971,9 +10862,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 408, + "startLine": 414, "startColumn": 5, - "charOffset": 14419, + "charOffset": 14814, "charLength": 10, "snippet": { "text": "name: None" @@ -10981,9 +10872,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 406, + "startLine": 412, "startColumn": 1, - "charOffset": 14391, + "charOffset": 14786, "charLength": 74, "snippet": { "text": " module: str,\n *,\n name: None,\n package: str,\n) -> ModuleType:" @@ -10999,9 +10890,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "b0a67daef448230c815a175ee2d93d617c5c602f6f147034126b434477466798" + "equalIndicator/v1": "8b7d04e018aac597af6d7c92ed3d0c7a7a38349c4bec56f62f1c223d07cca9c0" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11025,9 +10916,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 409, + "startLine": 415, "startColumn": 5, - "charOffset": 14435, + "charOffset": 14830, "charLength": 12, "snippet": { "text": "package: str" @@ -11035,9 +10926,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 407, + "startLine": 413, "startColumn": 1, - "charOffset": 14408, + "charOffset": 14803, "charLength": 85, "snippet": { "text": " *,\n name: None,\n package: str,\n) -> ModuleType:\n ... # pragma: no cover" @@ -11053,9 +10944,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "59653af0569cedb75341e5afd9ac7e99d341caa0f0346b0944c712a5123b2fea" + "equalIndicator/v1": "c113f37cf21f8279ba3fc72afcbcad742997029ec7368149dd86de2ba2512eeb" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11079,9 +10970,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 416, + "startLine": 422, "startColumn": 5, - "charOffset": 14532, + "charOffset": 14927, "charLength": 11, "snippet": { "text": "module: str" @@ -11089,9 +10980,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 414, + "startLine": 420, "startColumn": 1, - "charOffset": 14496, + "charOffset": 14891, "charLength": 70, "snippet": { "text": "@overload\ndef _optional_import(\n module: str,\n *,\n name: str," @@ -11107,9 +10998,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "d8207bbaea1764c9bd7b23fca159ee81c0504b762d2071bca255b8a8d423d647" + "equalIndicator/v1": "beebf11db5983511e0a489293db1e0f689d1b10f1133613a3e669ec8a8ea2e29" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11133,9 +11024,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 418, + "startLine": 424, "startColumn": 5, - "charOffset": 14556, + "charOffset": 14951, "charLength": 9, "snippet": { "text": "name: str" @@ -11143,9 +11034,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 416, + "startLine": 422, "startColumn": 1, - "charOffset": 14528, + "charOffset": 14923, "charLength": 81, "snippet": { "text": " module: str,\n *,\n name: str,\n package: str,\n) -> Callable[..., Any]:" @@ -11161,9 +11052,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "92cb952a17f4777c9d6c8e06128360ebb206f5931ddd007fc1eeec50f336157b" + "equalIndicator/v1": "29d223e39ee3fb8258dfa543973e1263e9b1d708b2e104026179c60c00d34574" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11187,9 +11078,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 419, + "startLine": 425, "startColumn": 5, - "charOffset": 14571, + "charOffset": 14966, "charLength": 12, "snippet": { "text": "package: str" @@ -11197,9 +11088,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 417, + "startLine": 423, "startColumn": 1, - "charOffset": 14545, + "charOffset": 14940, "charLength": 92, "snippet": { "text": " *,\n name: str,\n package: str,\n) -> Callable[..., Any]:\n ... # pragma: no cover" @@ -11215,9 +11106,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "689bbde0b1f7d710268cb11c420aa36ab821c2169176e93140a9cac6b51a1e3b" + "equalIndicator/v1": "9bd9c1450d0e190177ade3252dc91b45f4f54f5686cff3e5ba4ec94f97db0c27" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11241,9 +11132,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 426, + "startLine": 432, "startColumn": 5, - "charOffset": 14676, + "charOffset": 15071, "charLength": 11, "snippet": { "text": "module: str" @@ -11251,9 +11142,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 424, + "startLine": 430, "startColumn": 1, - "charOffset": 14640, + "charOffset": 15035, "charLength": 71, "snippet": { "text": "@overload\ndef _optional_import(\n module: str,\n *,\n name: None," @@ -11269,9 +11160,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "b1045b54c3d9e59dc84c2a9e2da20b11ebd44ae75cc4e934400a531f6b39d4d4" + "equalIndicator/v1": "921ab221a65d3992007d402a1b844c6560dfd3b46358102fed1bd13380248381" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11295,9 +11186,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 428, + "startLine": 434, "startColumn": 5, - "charOffset": 14700, + "charOffset": 15095, "charLength": 10, "snippet": { "text": "name: None" @@ -11305,9 +11196,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 426, + "startLine": 432, "startColumn": 1, - "charOffset": 14672, + "charOffset": 15067, "charLength": 75, "snippet": { "text": " module: str,\n *,\n name: None,\n package: None,\n) -> ModuleType:" @@ -11323,9 +11214,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "4b0dc821a53cf83c1957cbf73327a24daf1e3580bae3e99cdd4807007387424a" + "equalIndicator/v1": "0c632520bc6840ee8b1dad16653f38238403308f74305062c6e7d7ce0851f06f" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11349,9 +11240,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 429, + "startLine": 435, "startColumn": 5, - "charOffset": 14716, + "charOffset": 15111, "charLength": 13, "snippet": { "text": "package: None" @@ -11359,9 +11250,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 427, + "startLine": 433, "startColumn": 1, - "charOffset": 14689, + "charOffset": 15084, "charLength": 86, "snippet": { "text": " *,\n name: None,\n package: None,\n) -> ModuleType:\n ... # pragma: no cover" @@ -11377,9 +11268,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "b747e55e09e130105965d34ec0c7c274160a5567c8c086b2444ca5c282572bd8" + "equalIndicator/v1": "17c893b3eb6593ee7edeb7840baf8f51f6984a6376459396f5e8bde655440d9f" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11403,9 +11294,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 436, + "startLine": 442, "startColumn": 5, - "charOffset": 14814, + "charOffset": 15209, "charLength": 11, "snippet": { "text": "module: str" @@ -11413,9 +11304,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 434, + "startLine": 440, "startColumn": 1, - "charOffset": 14778, + "charOffset": 15173, "charLength": 70, "snippet": { "text": "@overload\ndef _optional_import(\n module: str,\n *,\n name: str," @@ -11431,9 +11322,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "756bc4a14f1fc5db1feeaa5326ff022f5150e372ac66f2aacf3bef07bfde15f7" + "equalIndicator/v1": "ccbb8c97b07c8c8ad01f12e2c3745f09f18874884858eae0246583ac96861001" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11457,9 +11348,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 438, + "startLine": 444, "startColumn": 5, - "charOffset": 14838, + "charOffset": 15233, "charLength": 9, "snippet": { "text": "name: str" @@ -11467,9 +11358,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 436, + "startLine": 442, "startColumn": 1, - "charOffset": 14810, + "charOffset": 15205, "charLength": 82, "snippet": { "text": " module: str,\n *,\n name: str,\n package: None,\n) -> Callable[..., Any]:" @@ -11485,9 +11376,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "4b41123c644c0d8707d3f2892a60292e77dfbdd63a8daf145395570c3e478635" + "equalIndicator/v1": "0c11d7dc94ccb5bdecf4bd056a894d6c72efc6f021190255c5626b784dbbd65a" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11511,9 +11402,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 439, + "startLine": 445, "startColumn": 5, - "charOffset": 14853, + "charOffset": 15248, "charLength": 13, "snippet": { "text": "package: None" @@ -11521,9 +11412,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 437, + "startLine": 443, "startColumn": 1, - "charOffset": 14827, + "charOffset": 15222, "charLength": 93, "snippet": { "text": " *,\n name: str,\n package: None,\n) -> Callable[..., Any]:\n ... # pragma: no cover" @@ -11539,9 +11430,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "0037608f143eb6dab0334fe5bdf36e882b8f6e08f47822b5a84a359002388858" + "equalIndicator/v1": "cd98bf060de44adf0e5a3c9d54fd90777a964a4aa4d9cd6deda87fbc3f5bf489" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11565,9 +11456,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 446, + "startLine": 452, "startColumn": 5, - "charOffset": 14959, + "charOffset": 15354, "charLength": 11, "snippet": { "text": "module: str" @@ -11575,9 +11466,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 444, + "startLine": 450, "startColumn": 1, - "charOffset": 14923, + "charOffset": 15318, "charLength": 70, "snippet": { "text": "@overload\ndef _optional_import(\n module: str,\n *,\n name: str," @@ -11593,9 +11484,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "8106c6351df8c2d800b84f026af36118afca02384d189605751be1c10d5cbd54" + "equalIndicator/v1": "827bb47fba1d6732bfa36878464d47689b61b30ffaf7ff00f0405bca3fc02d08" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11619,9 +11510,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 448, + "startLine": 454, "startColumn": 5, - "charOffset": 14983, + "charOffset": 15378, "charLength": 9, "snippet": { "text": "name: str" @@ -11629,9 +11520,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 446, + "startLine": 452, "startColumn": 1, - "charOffset": 14955, + "charOffset": 15350, "charLength": 91, "snippet": { "text": " module: str,\n *,\n name: str,\n) -> Callable[..., Any]:\n ... # pragma: no cover" @@ -11647,9 +11538,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "4511f811675c4c09ed469382f501a52d318b95616350c7c0232d9b621da56565" + "equalIndicator/v1": "e6c7640b6267a82a5e3465eeed3bc6f81d7ccd0202c007303e9bc42bb8c9d78e" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11673,9 +11564,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 455, + "startLine": 461, "startColumn": 5, - "charOffset": 15085, + "charOffset": 15480, "charLength": 11, "snippet": { "text": "module: str" @@ -11683,9 +11574,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 453, + "startLine": 459, "startColumn": 1, - "charOffset": 15049, + "charOffset": 15444, "charLength": 73, "snippet": { "text": "@overload\ndef _optional_import(\n module: str,\n *,\n package: str," @@ -11701,9 +11592,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "5e673749c3c17bfb58b18d11fd293743456bede1650234480c0caf5274cd6bbc" + "equalIndicator/v1": "5b7dc21593f9e381a4ca3b0056a11c9849240086d260078e0086412fcd0a7e69" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11727,9 +11618,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 457, + "startLine": 463, "startColumn": 5, - "charOffset": 15109, + "charOffset": 15504, "charLength": 12, "snippet": { "text": "package: str" @@ -11737,9 +11628,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 455, + "startLine": 461, "startColumn": 1, - "charOffset": 15081, + "charOffset": 15476, "charLength": 86, "snippet": { "text": " module: str,\n *,\n package: str,\n) -> ModuleType:\n ... # pragma: no cover" @@ -11755,9 +11646,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "935513ef6904cc56399db4e1d7852e1d24b071ab7dd42e51758ae613002eccab" + "equalIndicator/v1": "8e35f41bf62f211a8db12b103dd3db040b8e4d24a373379e8eadad12d3842cde" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -11781,9 +11672,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 647, + "startLine": 653, "startColumn": 5, - "charOffset": 20749, + "charOffset": 21145, "charLength": 18, "snippet": { "text": "x: Sequence[float]" @@ -11791,9 +11682,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 645, + "startLine": 651, "startColumn": 1, - "charOffset": 20708, + "charOffset": 21104, "charLength": 102, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: Sequence[float],\n y: Sequence[float],\n src_crs: CRS," @@ -11809,7 +11700,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "38ee83f8d70a171f59ee33f52f749960cbe3b419555909fb2730cb67d30b15e2" + "equalIndicator/v1": "0f3dadef7bfb1414bc4fa02a9b20a9a7aeb375767c8df5d4257c8d1643addd8e" }, "baselineState": "unchanged", "properties": { @@ -11835,9 +11726,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 648, + "startLine": 654, "startColumn": 5, - "charOffset": 20773, + "charOffset": 21169, "charLength": 18, "snippet": { "text": "y: Sequence[float]" @@ -11845,9 +11736,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 646, + "startLine": 652, "startColumn": 1, - "charOffset": 20718, + "charOffset": 21114, "charLength": 118, "snippet": { "text": "def transform_coordinates(\n x: Sequence[float],\n y: Sequence[float],\n src_crs: CRS,\n new_crs: CRS = WGS84," @@ -11863,7 +11754,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "12434d912b492026e4e9be7645a3d44950d145add04f96d7dc808da50c14c6bc" + "equalIndicator/v1": "823b6d6a87dba2145513eb30b5b0d33a1800a90f8e40990f58570a8e1bd0c784" }, "baselineState": "unchanged", "properties": { @@ -11889,9 +11780,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 649, + "startLine": 655, "startColumn": 5, - "charOffset": 20797, + "charOffset": 21193, "charLength": 12, "snippet": { "text": "src_crs: CRS" @@ -11899,9 +11790,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 647, + "startLine": 653, "startColumn": 1, - "charOffset": 20745, + "charOffset": 21141, "charLength": 137, "snippet": { "text": " x: Sequence[float],\n y: Sequence[float],\n src_crs: CRS,\n new_crs: CRS = WGS84,\n) -> Tuple[Sequence[float], Sequence[float]]:" @@ -11917,7 +11808,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "86872c31468cb5a68443410100d4b2690aa23766ec650bdb73c703d701f1f64e" + "equalIndicator/v1": "3f9445736a8e76ca79dcfdcd1d5aa68d78ca876c062fede0bf3de62b9571abae" }, "baselineState": "unchanged", "properties": { @@ -11943,9 +11834,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 650, + "startLine": 656, "startColumn": 5, - "charOffset": 20815, + "charOffset": 21211, "charLength": 20, "snippet": { "text": "new_crs: CRS = WGS84" @@ -11953,9 +11844,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 648, + "startLine": 654, "startColumn": 1, - "charOffset": 20769, + "charOffset": 21165, "charLength": 141, "snippet": { "text": " y: Sequence[float],\n src_crs: CRS,\n new_crs: CRS = WGS84,\n) -> Tuple[Sequence[float], Sequence[float]]:\n ... # pragma: no cover" @@ -11971,7 +11862,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "34bbdabdba9b865a8ebd3b7e10d0ba92ce324d4d4f79d03995913c0c71972225" + "equalIndicator/v1": "4fe06f9281ce4b4a57cddff4774e74a608c7b0faf1ab368b9bd54cb8a2b9d7eb" }, "baselineState": "unchanged", "properties": { @@ -11997,9 +11888,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 657, + "startLine": 663, "startColumn": 5, - "charOffset": 20954, + "charOffset": 21350, "charLength": 18, "snippet": { "text": "x: Sequence[float]" @@ -12007,9 +11898,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 655, + "startLine": 661, "startColumn": 1, - "charOffset": 20913, + "charOffset": 21309, "charLength": 92, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: Sequence[float],\n y: float,\n src_crs: CRS," @@ -12025,7 +11916,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "13f8573891d6e20031a7d665d5eb092413c236afe93a8af406684370630da937" + "equalIndicator/v1": "5da5ce035495de1cd2094e8c95897da2b33b3427495ae3474d9e3c738e472051" }, "baselineState": "unchanged", "properties": { @@ -12051,9 +11942,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 658, + "startLine": 664, "startColumn": 5, - "charOffset": 20978, + "charOffset": 21374, "charLength": 8, "snippet": { "text": "y: float" @@ -12061,9 +11952,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 656, + "startLine": 662, "startColumn": 1, - "charOffset": 20923, + "charOffset": 21319, "charLength": 108, "snippet": { "text": "def transform_coordinates(\n x: Sequence[float],\n y: float,\n src_crs: CRS,\n new_crs: CRS = WGS84," @@ -12079,7 +11970,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "23033e7ac92b63a3eb98a44a3e2fe5df64b6312646067d3f9a6c45fdd7379814" + "equalIndicator/v1": "f50dbfdfff32e7fd7ac13830d8158518354c68561197934137eab431d5b1e7f0" }, "baselineState": "unchanged", "properties": { @@ -12105,9 +11996,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 659, + "startLine": 665, "startColumn": 5, - "charOffset": 20992, + "charOffset": 21388, "charLength": 12, "snippet": { "text": "src_crs: CRS" @@ -12115,9 +12006,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 657, + "startLine": 663, "startColumn": 1, - "charOffset": 20950, + "charOffset": 21346, "charLength": 127, "snippet": { "text": " x: Sequence[float],\n y: float,\n src_crs: CRS,\n new_crs: CRS = WGS84,\n) -> Tuple[Sequence[float], Sequence[float]]:" @@ -12133,7 +12024,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "39cae2ee19f9188fb793825850ffb790ab7fe99c4c4b28ab627b289a559f94ec" + "equalIndicator/v1": "5fb34a8c323e9598cf367a8cf7962e5a6000f0e7979249738f8b172427abc462" }, "baselineState": "unchanged", "properties": { @@ -12159,9 +12050,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 660, + "startLine": 666, "startColumn": 5, - "charOffset": 21010, + "charOffset": 21406, "charLength": 20, "snippet": { "text": "new_crs: CRS = WGS84" @@ -12169,9 +12060,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 658, + "startLine": 664, "startColumn": 1, - "charOffset": 20974, + "charOffset": 21370, "charLength": 131, "snippet": { "text": " y: float,\n src_crs: CRS,\n new_crs: CRS = WGS84,\n) -> Tuple[Sequence[float], Sequence[float]]:\n ... # pragma: no cover" @@ -12187,7 +12078,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "6fbc9db7009212357c2e23b003ce2e21ff0087457e5b3b6d52a5faaa3d9d45a0" + "equalIndicator/v1": "f8b7c12f0cb51c238ad3c7037a2b27754799811214cc8066dac9fe43f3b62e3f" }, "baselineState": "unchanged", "properties": { @@ -12213,9 +12104,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 667, + "startLine": 673, "startColumn": 5, - "charOffset": 21149, + "charOffset": 21545, "charLength": 8, "snippet": { "text": "x: float" @@ -12223,9 +12114,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 665, + "startLine": 671, "startColumn": 1, - "charOffset": 21108, + "charOffset": 21504, "charLength": 92, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: float,\n y: Sequence[float],\n src_crs: CRS," @@ -12241,7 +12132,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "d93635509db05661161de4f18ee52fd55afdfb2e08e1465c5b6680f4a39c367b" + "equalIndicator/v1": "c2733a760ad5d1e44d7f6482628248f404337010530c85741132e30234997251" }, "baselineState": "unchanged", "properties": { @@ -12267,9 +12158,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 668, + "startLine": 674, "startColumn": 5, - "charOffset": 21163, + "charOffset": 21559, "charLength": 18, "snippet": { "text": "y: Sequence[float]" @@ -12277,9 +12168,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 666, + "startLine": 672, "startColumn": 1, - "charOffset": 21118, + "charOffset": 21514, "charLength": 108, "snippet": { "text": "def transform_coordinates(\n x: float,\n y: Sequence[float],\n src_crs: CRS,\n new_crs: CRS = WGS84," @@ -12295,7 +12186,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "9c59c42e7b3a32ba1eea9b2f26520f9d99524282cd76663229fbd04b27499499" + "equalIndicator/v1": "34679cec2f9d28bce509257e9d701c6c19322c04b4b23da1e11ba884a8457e83" }, "baselineState": "unchanged", "properties": { @@ -12321,9 +12212,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 669, + "startLine": 675, "startColumn": 5, - "charOffset": 21187, + "charOffset": 21583, "charLength": 12, "snippet": { "text": "src_crs: CRS" @@ -12331,9 +12222,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 667, + "startLine": 673, "startColumn": 1, - "charOffset": 21145, + "charOffset": 21541, "charLength": 127, "snippet": { "text": " x: float,\n y: Sequence[float],\n src_crs: CRS,\n new_crs: CRS = WGS84,\n) -> Tuple[Sequence[float], Sequence[float]]:" @@ -12349,7 +12240,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "8ad63bcbcbbc432eafd7fa5710811a9e8ecb06c5d960d8e9b7025ba8c3519ca6" + "equalIndicator/v1": "6cef23966772a3a911f93a29da22fda9021a2d02212acc8ec4608f8e1c3757da" }, "baselineState": "unchanged", "properties": { @@ -12375,9 +12266,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 670, + "startLine": 676, "startColumn": 5, - "charOffset": 21205, + "charOffset": 21601, "charLength": 20, "snippet": { "text": "new_crs: CRS = WGS84" @@ -12385,9 +12276,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 668, + "startLine": 674, "startColumn": 1, - "charOffset": 21159, + "charOffset": 21555, "charLength": 141, "snippet": { "text": " y: Sequence[float],\n src_crs: CRS,\n new_crs: CRS = WGS84,\n) -> Tuple[Sequence[float], Sequence[float]]:\n ... # pragma: no cover" @@ -12403,7 +12294,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "4afbb49c261e4c3d1a2fe3eb281f41dc584d8e5e7ee471898d83bce8134b87f1" + "equalIndicator/v1": "253bb0c558ecdb78641b98abf56ce9b6fc08564f4cfc06cc537e6a5d6f148763" }, "baselineState": "unchanged", "properties": { @@ -12429,9 +12320,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 677, + "startLine": 683, "startColumn": 5, - "charOffset": 21344, + "charOffset": 21740, "charLength": 8, "snippet": { "text": "x: float" @@ -12439,9 +12330,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 675, + "startLine": 681, "startColumn": 1, - "charOffset": 21303, + "charOffset": 21699, "charLength": 149, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: float, y: float, src_crs: CRS, new_crs: CRS = WGS84\n) -> Tuple[float, float]:\n ... # pragma: no cover" @@ -12457,7 +12348,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "283fa23dcb3fc5e827a28b06c0f430cc9dbbe4ce3a07d168e52fc303d08531c6" + "equalIndicator/v1": "699f1f249addc596b6641d3df3ae70dc76bc988b438effa84787fccd14a57878" }, "baselineState": "unchanged", "properties": { @@ -12483,9 +12374,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 677, + "startLine": 683, "startColumn": 15, - "charOffset": 21354, + "charOffset": 21750, "charLength": 8, "snippet": { "text": "y: float" @@ -12493,9 +12384,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 675, + "startLine": 681, "startColumn": 1, - "charOffset": 21303, + "charOffset": 21699, "charLength": 149, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: float, y: float, src_crs: CRS, new_crs: CRS = WGS84\n) -> Tuple[float, float]:\n ... # pragma: no cover" @@ -12511,7 +12402,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "25d2f55bc463e256292546c24fb4b5d018fc3911b4ce7507323b72919b81054f" + "equalIndicator/v1": "1e314f0928838c740e7ee6bfbc6c9da2288ff8d33f3c59a6d1692604c49db761" }, "baselineState": "unchanged", "properties": { @@ -12537,9 +12428,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 677, + "startLine": 683, "startColumn": 25, - "charOffset": 21364, + "charOffset": 21760, "charLength": 12, "snippet": { "text": "src_crs: CRS" @@ -12547,9 +12438,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 675, + "startLine": 681, "startColumn": 1, - "charOffset": 21303, + "charOffset": 21699, "charLength": 149, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: float, y: float, src_crs: CRS, new_crs: CRS = WGS84\n) -> Tuple[float, float]:\n ... # pragma: no cover" @@ -12565,7 +12456,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "81be54281bedc4aedd2484f5edfe9bd71b1b253517d35e85740fa1459cf7e612" + "equalIndicator/v1": "a2c95629833811f23e716c3361aa43de8593de8e994c5f06357811aa61850a3a" }, "baselineState": "unchanged", "properties": { @@ -12591,9 +12482,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 677, + "startLine": 683, "startColumn": 39, - "charOffset": 21378, + "charOffset": 21774, "charLength": 20, "snippet": { "text": "new_crs: CRS = WGS84" @@ -12601,9 +12492,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 675, + "startLine": 681, "startColumn": 1, - "charOffset": 21303, + "charOffset": 21699, "charLength": 149, "snippet": { "text": "@overload\ndef transform_coordinates(\n x: float, y: float, src_crs: CRS, new_crs: CRS = WGS84\n) -> Tuple[float, float]:\n ... # pragma: no cover" @@ -12619,7 +12510,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "fe2b18652cccf3d711e50204319af335723419d07636f690bb13ab74fcb0e0dd" + "equalIndicator/v1": "d2de157b54bf676d93106ad9a49d6a4c8f27b67c7e317b607d87841cd858c7e2" }, "baselineState": "unchanged", "properties": { @@ -12645,9 +12536,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1107, + "startLine": 1113, "startColumn": 5, - "charOffset": 37064, + "charOffset": 37460, "charLength": 24, "snippet": { "text": "array: NDArray[Any, Int]" @@ -12655,9 +12546,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1105, + "startLine": 1111, "startColumn": 1, - "charOffset": 37030, + "charOffset": 37426, "charLength": 134, "snippet": { "text": "@overload\ndef mask_transform(\n array: NDArray[Any, Int], matrix: Dict[int, int]\n) -> NDArray[Any, Int]:\n ... # pragma: no cover" @@ -12673,7 +12564,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "fa531a78d26b58c286d0cb966d99afc160a246f05d410a36c1e4cbe159dbdbde" + "equalIndicator/v1": "eca453229b681f82e9718144e2ca925652086f032c2395052c404846e402f21b" }, "baselineState": "unchanged", "properties": { @@ -12699,9 +12590,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1107, + "startLine": 1113, "startColumn": 31, - "charOffset": 37090, + "charOffset": 37486, "charLength": 22, "snippet": { "text": "matrix: Dict[int, int]" @@ -12709,9 +12600,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1105, + "startLine": 1111, "startColumn": 1, - "charOffset": 37030, + "charOffset": 37426, "charLength": 134, "snippet": { "text": "@overload\ndef mask_transform(\n array: NDArray[Any, Int], matrix: Dict[int, int]\n) -> NDArray[Any, Int]:\n ... # pragma: no cover" @@ -12727,7 +12618,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "07647cea09c5d2a68c56c9e4f06544071584c886990188970e8d4b85d2eb00fc" + "equalIndicator/v1": "4c0ae32aa38894f1a62df12ad12f4c87c2118c698530a3ba2e2b6aa88ac6a690" }, "baselineState": "unchanged", "properties": { @@ -12753,9 +12644,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1113, + "startLine": 1119, "startColumn": 20, - "charOffset": 37196, + "charOffset": 37592, "charLength": 17, "snippet": { "text": "array: LongTensor" @@ -12763,9 +12654,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1111, + "startLine": 1117, "startColumn": 1, - "charOffset": 37166, + "charOffset": 37562, "charLength": 116, "snippet": { "text": "\n@overload\ndef mask_transform(array: LongTensor, matrix: Dict[int, int]) -> LongTensor:\n ... # pragma: no cover\n" @@ -12781,7 +12672,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "134b208713e34f952e02aae368f05a668fb259578fb178f1b21fb00de6d5e395" + "equalIndicator/v1": "5c003eb7b54acbe7cc0b18b322c53e92101a8b736b4c2c4796fe3d35f6e03bbf" }, "baselineState": "unchanged", "properties": { @@ -12807,9 +12698,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1113, + "startLine": 1119, "startColumn": 39, - "charOffset": 37215, + "charOffset": 37611, "charLength": 22, "snippet": { "text": "matrix: Dict[int, int]" @@ -12817,9 +12708,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1111, + "startLine": 1117, "startColumn": 1, - "charOffset": 37166, + "charOffset": 37562, "charLength": 116, "snippet": { "text": "\n@overload\ndef mask_transform(array: LongTensor, matrix: Dict[int, int]) -> LongTensor:\n ... # pragma: no cover\n" @@ -12835,7 +12726,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "fc4720f6f68bd3877becbfe657acde5b380c42816c7fa2ee64ccd8d57b0d5539" + "equalIndicator/v1": "d086584adff40cf3be091b72722e5c23e839ab1089e59a9d12301f65b1b168dc" }, "baselineState": "unchanged", "properties": { @@ -12846,37 +12737,37 @@ } }, { - "ruleId": "PyShadowingNamesInspection", + "ruleId": "PyUnusedLocalInspection", "kind": "fail", "level": "note", "message": { - "text": "Shadows name 'name' from outer scope", - "markdown": "Shadows name 'name' from outer scope" + "text": "Parameter 'kwargs' value is not used", + "markdown": "Parameter 'kwargs' value is not used" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/datasets.py", + "uri": "minerva/modelio.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 637, - "startColumn": 13, - "charOffset": 24547, - "charLength": 4, + "startLine": 61, + "startColumn": 5, + "charOffset": 2781, + "charLength": 8, "snippet": { - "text": "name" + "text": "**kwargs" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 635, + "startLine": 59, "startColumn": 1, - "charOffset": 24521, - "charLength": 175, + "charOffset": 2706, + "charLength": 258, "snippet": { - "text": "\n Example:\n >>> name = \"RandomResizedCrop\"\n >>> params = {\"module\": \"torchvision.transforms\", \"size\": 128}\n >>> transform = get_transform(name, params)" + "text": " device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], Tensor, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a supervised model using :mod:`torchgeo` datasets." } } }, @@ -12889,7 +12780,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "1c6b35a417373ae9677357891f726244d850aba9f404c2d4f2c7017d07be1fd0" + "equalIndicator/v1": "07efef422377d6204abdd4f61717e6fdb3cd71e60808a14a968e14bad1a45f81" }, "baselineState": "unchanged", "properties": { @@ -12900,37 +12791,37 @@ } }, { - "ruleId": "PyShadowingNamesInspection", + "ruleId": "PyUnusedLocalInspection", "kind": "fail", "level": "note", "message": { - "text": "Shadows name 'params' from outer scope", - "markdown": "Shadows name 'params' from outer scope" + "text": "Parameter 'kwargs' value is not used", + "markdown": "Parameter 'kwargs' value is not used" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/datasets.py", + "uri": "minerva/modelio.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 638, - "startColumn": 13, - "charOffset": 24586, - "charLength": 6, + "startLine": 201, + "startColumn": 5, + "charOffset": 7864, + "charLength": 8, "snippet": { - "text": "params" + "text": "**kwargs" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 636, + "startLine": 199, "startColumn": 1, - "charOffset": 24522, - "charLength": 175, + "charOffset": 7789, + "charLength": 269, "snippet": { - "text": " Example:\n >>> name = \"RandomResizedCrop\"\n >>> params = {\"module\": \"torchvision.transforms\", \"size\": 128}\n >>> transform = get_transform(name, params)\n" + "text": " device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], None, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a self-supervised Siamese model using :mod:`torchgeo` datasets." } } }, @@ -12943,7 +12834,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "afe55ea73db7dd2a2b4c2c31a3695cf24a4ff3fe3ef1e562a5304e433b4f2557" + "equalIndicator/v1": "e261bb1ac388f3c92611c4fba3eaeaa02a5a80ccbce148d50cb825eb2a7d70a3" }, "baselineState": "unchanged", "properties": { @@ -12958,8 +12849,116 @@ "kind": "fail", "level": "note", "message": { - "text": "Shadows name 'transform' from outer scope", - "markdown": "Shadows name 'transform' from outer scope" + "text": "Shadows name 'err' from outer scope", + "markdown": "Shadows name 'err' from outer scope" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "minerva/trainer.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 401, + "startColumn": 44, + "charOffset": 20832, + "charLength": 3, + "snippet": { + "text": "err" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 399, + "startColumn": 1, + "charOffset": 20675, + "charLength": 278, + "snippet": { + "text": " input_to_model=torch.rand(*input_size, device=self.device),\n )\n except RuntimeError as err: # pragma: no cover\n print(err)\n print(\"ABORT adding graph to writer\")" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "8b01840707a1737ff454f5839c5da7869f0758910a4d6bfe0457a55918904317" + }, + "baselineState": "unchanged", + "properties": { + "ideaSeverity": "WEAK WARNING", + "tags": [ + "Python" + ] + } + }, + { + "ruleId": "PyShadowingNamesInspection", + "kind": "fail", + "level": "note", + "message": { + "text": "Shadows name 'err' from outer scope", + "markdown": "Shadows name 'err' from outer scope" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "minerva/trainer.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1173, + "startColumn": 46, + "charOffset": 53004, + "charLength": 3, + "snippet": { + "text": "err" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 1171, + "startColumn": 1, + "charOffset": 52894, + "charLength": 233, + "snippet": { + "text": " metrics_df.to_csv(f\"{self.exp_fn}_metrics.csv\")\n\n except (ValueError, KeyError) as err: # pragma: no cover\n self.print(err)\n self.print(\"\\n*ERROR* in saving metrics to file.\")" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "bfdbd60a5ff397c748d9c98aa7ba3bafca0eff23157729ffb567450aeb6c92ba" + }, + "baselineState": "unchanged", + "properties": { + "ideaSeverity": "WEAK WARNING", + "tags": [ + "Python" + ] + } + }, + { + "ruleId": "PyShadowingNamesInspection", + "kind": "fail", + "level": "note", + "message": { + "text": "Shadows name 'name' from outer scope", + "markdown": "Shadows name 'name' from outer scope" }, "locations": [ { @@ -12969,22 +12968,22 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 639, + "startLine": 645, "startColumn": 13, - "charOffset": 24657, - "charLength": 9, + "charOffset": 25079, + "charLength": 4, "snippet": { - "text": "transform" + "text": "name" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 637, + "startLine": 643, "startColumn": 1, - "charOffset": 24535, - "charLength": 174, + "charOffset": 25053, + "charLength": 175, "snippet": { - "text": " >>> name = \"RandomResizedCrop\"\n >>> params = {\"module\": \"torchvision.transforms\", \"size\": 128}\n >>> transform = get_transform(name, params)\n\n Raises:" + "text": "\n Example:\n >>> name = \"RandomResizedCrop\"\n >>> params = {\"module\": \"torchvision.transforms\", \"size\": 128}\n >>> transform = get_transform(name, params)" } } }, @@ -12997,7 +12996,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "bd7c2c13e7b62df01b6e3b19fdd191e057c0a381c4a857583f42eb1b17e07d72" + "equalIndicator/v1": "2e49d648deec46f5b1482640315689f55c66b96c569e8362e4418ed3d7ac4da9" }, "baselineState": "unchanged", "properties": { @@ -13012,8 +13011,8 @@ "kind": "fail", "level": "note", "message": { - "text": "Shadows name 'transform_params' from outer scope", - "markdown": "Shadows name 'transform_params' from outer scope" + "text": "Shadows name 'params' from outer scope", + "markdown": "Shadows name 'params' from outer scope" }, "locations": [ { @@ -13023,22 +13022,22 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 696, + "startLine": 646, "startColumn": 13, - "charOffset": 26638, - "charLength": 16, + "charOffset": 25118, + "charLength": 6, "snippet": { - "text": "transform_params" + "text": "params" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 694, + "startLine": 644, "startColumn": 1, - "charOffset": 26612, - "charLength": 213, + "charOffset": 25054, + "charLength": 175, "snippet": { - "text": "\n Example:\n >>> transform_params = {\n >>> \"CenterCrop\": {\"module\": \"torchvision.transforms\", \"size\": 128},\n >>> \"RandomHorizontalFlip\": {\"module\": \"torchvision.transforms\", \"p\": 0.7}" + "text": " Example:\n >>> name = \"RandomResizedCrop\"\n >>> params = {\"module\": \"torchvision.transforms\", \"size\": 128}\n >>> transform = get_transform(name, params)\n" } } }, @@ -13051,7 +13050,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "10d1765aeff9477a5eec9a44fcdacd2ffa0a6ca0bbd5e2b62518087cc0d00f50" + "equalIndicator/v1": "5851b5f7198a914462b9ba6967858ecf500e2c4495c19dd3a983a66308a0c403" }, "baselineState": "unchanged", "properties": { @@ -13066,33 +13065,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Shadows name 'err' from outer scope", - "markdown": "Shadows name 'err' from outer scope" + "text": "Shadows name 'transform' from outer scope", + "markdown": "Shadows name 'transform' from outer scope" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/trainer.py", + "uri": "minerva/datasets.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 395, - "startColumn": 44, - "charOffset": 20437, - "charLength": 3, + "startLine": 647, + "startColumn": 13, + "charOffset": 25189, + "charLength": 9, "snippet": { - "text": "err" + "text": "transform" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 393, + "startLine": 645, "startColumn": 1, - "charOffset": 20280, - "charLength": 278, + "charOffset": 25067, + "charLength": 174, "snippet": { - "text": " input_to_model=torch.rand(*input_size, device=self.device),\n )\n except RuntimeError as err: # pragma: no cover\n print(err)\n print(\"ABORT adding graph to writer\")" + "text": " >>> name = \"RandomResizedCrop\"\n >>> params = {\"module\": \"torchvision.transforms\", \"size\": 128}\n >>> transform = get_transform(name, params)\n\n Raises:" } } }, @@ -13105,9 +13104,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "800eccea4cb0cf672e8c12b20f947abfa14fdf5d2e08ded379a1d50159b416d1" + "equalIndicator/v1": "3c46c9e1cd78bc93be86c8d8fa7853f31c4b9de47742a6f7d4fc26369235d0f4" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -13120,33 +13119,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Shadows name 'err' from outer scope", - "markdown": "Shadows name 'err' from outer scope" + "text": "Shadows name 'transform_params' from outer scope", + "markdown": "Shadows name 'transform_params' from outer scope" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/trainer.py", + "uri": "minerva/datasets.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1159, - "startColumn": 46, - "charOffset": 52445, - "charLength": 3, + "startLine": 704, + "startColumn": 13, + "charOffset": 27170, + "charLength": 16, "snippet": { - "text": "err" + "text": "transform_params" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1157, + "startLine": 702, "startColumn": 1, - "charOffset": 52335, - "charLength": 233, + "charOffset": 27144, + "charLength": 213, "snippet": { - "text": " metrics_df.to_csv(f\"{self.exp_fn}_metrics.csv\")\n\n except (ValueError, KeyError) as err: # pragma: no cover\n self.print(err)\n self.print(\"\\n*ERROR* in saving metrics to file.\")" + "text": "\n Example:\n >>> transform_params = {\n >>> \"CenterCrop\": {\"module\": \"torchvision.transforms\", \"size\": 128},\n >>> \"RandomHorizontalFlip\": {\"module\": \"torchvision.transforms\", \"p\": 0.7}" } } }, @@ -13159,9 +13158,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "20687f3a436964ee7a31106fc2fcf903e71828e43b4220381a81a4d322254ddb" + "equalIndicator/v1": "c0a848fa7b4cea71e2f18ea47198ac084b481dee2ad8031ba6401fcc322fa9e4" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -13239,9 +13238,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 51, + "startLine": 57, "startColumn": 31, - "charOffset": 1950, + "charOffset": 2345, "charLength": 1, "snippet": { "text": "F" @@ -13249,9 +13248,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 49, + "startLine": 55, "startColumn": 1, - "charOffset": 1906, + "charOffset": 2301, "charLength": 100, "snippet": { "text": "\nimport torch\nimport torch.nn.functional as F\nimport torch.nn.modules as nn\nfrom torch import Tensor" @@ -13267,7 +13266,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "b06945ae3d9361e047e380960e53aff91d428f44980d1d4a5ca4cac5edd71667" + "equalIndicator/v1": "ec51aaf46c727d9bcdb2c045ae3fd6f761fcf57be86d4f9b733cf668113f0c57" }, "baselineState": "unchanged", "properties": { @@ -13282,33 +13281,33 @@ "kind": "fail", "level": "note", "message": { - "text": "CamelCase variable imported as constant", - "markdown": "CamelCase variable imported as constant" + "text": "Lowercase variable imported as non-lowercase", + "markdown": "Lowercase variable imported as non-lowercase" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/trainer.py", + "uri": "minerva/utils/utils.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 60, - "startColumn": 58, - "charOffset": 2309, - "charLength": 3, + "startLine": 165, + "startColumn": 36, + "charOffset": 5895, + "charLength": 1, "snippet": { - "text": "DDP" + "text": "F" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 58, + "startLine": 163, "startColumn": 1, - "charOffset": 2191, - "charLength": 162, + "charOffset": 5793, + "charLength": 171, "snippet": { - "text": "from torch import Tensor\nfrom torch.nn.modules import Module\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import DataLoader\n" + "text": "from tabulate import tabulate\nfrom torch import LongTensor, Tensor\nfrom torch.nn import functional as F\nfrom torch.nn.modules import Module\nfrom torch.types import _device" } } }, @@ -13321,7 +13320,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "be2e2ed7ebaa973e4701db47ab86babe28275cb66dee8346a0c833cbccf31f5f" + "equalIndicator/v1": "c4199c675e3d62d912cf5f2ae27acbc90f4a3b3ca4b066509391bdfc90956885" }, "baselineState": "unchanged", "properties": { @@ -13336,33 +13335,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Lowercase variable imported as non-lowercase", - "markdown": "Lowercase variable imported as non-lowercase" + "text": "CamelCase variable imported as constant", + "markdown": "CamelCase variable imported as constant" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/utils/utils.py", + "uri": "minerva/trainer.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 159, - "startColumn": 36, - "charOffset": 5500, - "charLength": 1, + "startLine": 66, + "startColumn": 58, + "charOffset": 2704, + "charLength": 3, "snippet": { - "text": "F" + "text": "DDP" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 157, + "startLine": 64, "startColumn": 1, - "charOffset": 5398, - "charLength": 171, + "charOffset": 2586, + "charLength": 162, "snippet": { - "text": "from tabulate import tabulate\nfrom torch import LongTensor, Tensor\nfrom torch.nn import functional as F\nfrom torch.nn.modules import Module\nfrom torch.types import _device" + "text": "from torch import Tensor\nfrom torch.nn.modules import Module\nfrom torch.nn.parallel import DistributedDataParallel as DDP\nfrom torch.utils.data import DataLoader\n" } } }, @@ -13375,7 +13374,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "fcbb75c61af1244cb5a5d34ccf63e9500d0c6fb9a5520e92371603c500c09d09" + "equalIndicator/v1": "23e93fc3736112f951b896beb7c48b3bf52be8b6591af96b54f355fe0e85d020" }, "baselineState": "unchanged", "properties": { @@ -13401,9 +13400,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 521, + "startLine": 527, "startColumn": 5, - "charOffset": 17040, + "charOffset": 17436, "charLength": 6, "snippet": { "text": "except" @@ -13411,9 +13410,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 519, + "startLine": 525, "startColumn": 1, - "charOffset": 16952, + "charOffset": 17348, "charLength": 160, "snippet": { "text": " if \"VSCODE_PID\" in os.environ: # pragma: no cover\n return False\n except: # noqa: E722\n return False\n else: # pragma: no cover" @@ -13429,7 +13428,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "86a04c2d2f1caa67f6daf4d3435100ae3c9441e9008a095af3d5d9f3c4eaa115" + "equalIndicator/v1": "46c92d1bcb92b868811eb7960fcf4fe989134cd4b1fa98b7a636a2f772ecf1e3" }, "baselineState": "unchanged", "properties": { @@ -13455,9 +13454,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 91, + "startLine": 97, "startColumn": 30, - "charOffset": 4224, + "charOffset": 4619, "charLength": 15, "snippet": { "text": "\"32\", \"16\", \"8\"" @@ -13465,9 +13464,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 89, + "startLine": 95, "startColumn": 1, - "charOffset": 4158, + "charOffset": 4553, "charLength": 108, "snippet": { "text": "\n backbone_name: str = \"ResNet18\"\n decoder_variant: Literal[\"32\", \"16\", \"8\"] = \"32\"\n\n def __init__(" @@ -13483,7 +13482,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "85d700f887713334dab651c626ecab8990034172e1a7ec4947dfb75d900cd768" + "equalIndicator/v1": "147dae8164421c2995478164f099e19d670cfdba40275361166969dc391fd656" }, "baselineState": "unchanged", "properties": { @@ -13509,9 +13508,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 196, + "startLine": 202, "startColumn": 26, - "charOffset": 9384, + "charOffset": 9779, "charLength": 15, "snippet": { "text": "\"32\", \"16\", \"8\"" @@ -13519,9 +13518,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 194, + "startLine": 200, "startColumn": 1, - "charOffset": 9299, + "charOffset": 9694, "charLength": 179, "snippet": { "text": " in_channel: int = 512,\n n_classes: int = 21,\n variant: Literal[\"32\", \"16\", \"8\"] = \"32\",\n ) -> None:\n super(DCN, self).__init__(n_classes=n_classes)" @@ -13537,7 +13536,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "497fb9fd0f078b19cfcda116783f462aae96e8d28e5225cc4faa4361b0a5a42e" + "equalIndicator/v1": "f5132c1065769c8c99744c4765a59334f3a95036fe0f3264176a5daa4b410047" }, "baselineState": "unchanged", "properties": { @@ -13563,9 +13562,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 199, + "startLine": 205, "startColumn": 31, - "charOffset": 9509, + "charOffset": 9904, "charLength": 15, "snippet": { "text": "\"32\", \"16\", \"8\"" @@ -13573,9 +13572,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 197, + "startLine": 203, "startColumn": 1, - "charOffset": 9409, + "charOffset": 9804, "charLength": 170, "snippet": { "text": " ) -> None:\n super(DCN, self).__init__(n_classes=n_classes)\n self.variant: Literal[\"32\", \"16\", \"8\"] = variant\n\n assert type(self.n_classes) is int" @@ -13591,7 +13590,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "97a29c4c7fd021b569720dfca53574f0b17ac295b459cc18a0569bc505db51ce" + "equalIndicator/v1": "fe143f54fbd52ab970e566c2201c1b77f8aa5a52375400e4d602117e1d0fb61f" }, "baselineState": "unchanged", "properties": { @@ -13617,9 +13616,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 422, + "startLine": 429, "startColumn": 41, - "charOffset": 15637, + "charOffset": 16048, "charLength": 8, "snippet": { "text": "\"params\"" @@ -13627,9 +13626,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 420, + "startLine": 427, "startColumn": 1, - "charOffset": 15528, + "charOffset": 15939, "charLength": 198, "snippet": { "text": " dataset_class: Callable[..., GeoDataset],\n root: str,\n subdataset_params: Dict[Literal[\"params\"], Dict[str, Any]],\n _transformations: Optional[Any],\n ) -> GeoDataset:" @@ -13645,7 +13644,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "c98c2de71363bbccfe76d5dba5a60315b3e4cb87c8f26ea33bfbb48427188970" + "equalIndicator/v1": "5fdd9163613255f9b6d06424e0f15af9f7334dc499904b2d1ea19d79bb84e6e8" }, "baselineState": "unchanged", "properties": { @@ -13671,9 +13670,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 684, + "startLine": 692, "startColumn": 53, - "charOffset": 26061, + "charOffset": 26593, "charLength": 5, "snippet": { "text": "False" @@ -13681,9 +13680,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 682, + "startLine": 690, "startColumn": 1, - "charOffset": 25982, + "charOffset": 26514, "charLength": 217, "snippet": { "text": "\ndef make_transformations(\n transform_params: Union[Dict[str, Any], Literal[False]], key: Optional[str] = None\n) -> Optional[Any]:\n \"\"\"Constructs a transform or series of transforms based on parameters provided." @@ -13699,7 +13698,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "1192d3937df0cbf5939b0a3aa2066dfd79dee1d6ac78ae022e186dad517f9b3d" + "equalIndicator/v1": "d38d6aa0cf22bb05f6d5e75c782bce87299f32e79a7d0a010e602407fa26b253" }, "baselineState": "unchanged", "properties": { @@ -13725,9 +13724,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 294, + "startLine": 300, "startColumn": 17, - "charOffset": 10287, + "charOffset": 10682, "charLength": 3, "snippet": { "text": "cls" @@ -13735,9 +13734,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 292, + "startLine": 298, "startColumn": 1, - "charOffset": 10269, + "charOffset": 10664, "charLength": 182, "snippet": { "text": "\n\ndef tg_to_torch(cls, keys: Optional[Sequence[str]] = None):\n \"\"\"Ensures wrapped transform can handle both :class:`~torch.Tensor` and :mod:`torchgeo` style :class:`dict` inputs.\n" @@ -13753,7 +13752,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "0f9ff6dad4f0fb97e230da4fe996589ad6341c413306f38a88b758b9bfe420fe" + "equalIndicator/v1": "0872353e5623abbaab92979872d9f543acc8984bbe6970119996ad9ea396c66a" }, "baselineState": "unchanged", "properties": { @@ -13779,9 +13778,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1622, + "startLine": 1628, "startColumn": 5, - "charOffset": 55826, + "charOffset": 56222, "charLength": 22, "snippet": { "text": "_testing: bool = False" @@ -13789,9 +13788,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1620, + "startLine": 1626, "startColumn": 1, - "charOffset": 55757, + "charOffset": 56153, "charLength": 182, "snippet": { "text": " env_name: str = \"env\",\n host_num: Union[str, int] = 6006,\n _testing: bool = False,\n) -> Optional[int]:\n \"\"\"Runs the :mod:`TensorBoard` logs and hosts on a local webpage." @@ -13807,7 +13806,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "3511f73dca1355463da7e9cab753cd976d1a19526fdd80b3b890bfbe6a7b6e62" + "equalIndicator/v1": "4a2af2bc668e8b4be98fc0dda7295e78269096320a5740fbc91508c16b1dd703" }, "baselineState": "unchanged", "properties": { @@ -13822,33 +13821,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _to_tuple of a class", - "markdown": "Access to a protected member _to_tuple of a class" + "text": "Access to a protected member _print_banner of a module", + "markdown": "Access to a protected member _print_banner of a module" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/samplers.py", + "uri": "scripts/MinervaExp.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 44, - "startColumn": 37, - "charOffset": 2030, - "charLength": 9, + "startLine": 84, + "startColumn": 5, + "charOffset": 3247, + "charLength": 19, "snippet": { - "text": "_to_tuple" + "text": "utils._print_banner" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 42, + "startLine": 82, "startColumn": 1, - "charOffset": 1888, - "charLength": 209, + "charOffset": 3214, + "charLength": 97, "snippet": { - "text": "from torchgeo.datasets.utils import BoundingBox\nfrom torchgeo.samplers import BatchGeoSampler, GeoSampler\nfrom torchgeo.samplers.utils import _to_tuple, get_random_bounding_box\n\nfrom minerva.utils import utils" + "text": "\n # Print Minerva banner.\n utils._print_banner()\n\n with runner.WandbConnectionManager():" } } }, @@ -13861,7 +13860,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "de54a8cc6073b39a290127bbaffb944302ee0ab7f290de14ccc9fd576482cb72" + "equalIndicator/v1": "5b3f689acab69ac91e0e73dd479ea7c734e7ce63da61fcc8cb88339a1cd15684" }, "baselineState": "unchanged", "properties": { @@ -13876,33 +13875,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _api of a class", - "markdown": "Access to a protected member _api of a class" + "text": "Access to a protected member _to_tuple of a class", + "markdown": "Access to a protected member _to_tuple of a class" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/TorchWeightDownloader.py", + "uri": "minerva/samplers.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 41, - "startColumn": 6, - "charOffset": 1764, - "charLength": 23, + "startLine": 50, + "startColumn": 37, + "charOffset": 2425, + "charLength": 9, "snippet": { - "text": "torchvision.models._api" + "text": "_to_tuple" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 39, + "startLine": 48, "startColumn": 1, - "charOffset": 1730, - "charLength": 122, + "charOffset": 2283, + "charLength": 209, "snippet": { - "text": "from typing import Optional\n\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.models import get_torch_weights" + "text": "from torchgeo.datasets.utils import BoundingBox\nfrom torchgeo.samplers import BatchGeoSampler, GeoSampler\nfrom torchgeo.samplers.utils import _to_tuple, get_random_bounding_box\n\nfrom minerva.utils import utils" } } }, @@ -13915,7 +13914,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "8edc833231284b872e1de9249066787847d2bdecd4d4899ac1255f753019891b" + "equalIndicator/v1": "2bdb77c5be296b59b25cd653bf295b2c6a53abc2a8f3527e60a1a95a7c6aa9a1" }, "baselineState": "unchanged", "properties": { @@ -13930,33 +13929,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _print_banner of a module", - "markdown": "Access to a protected member _print_banner of a module" + "text": "Access to a protected member _optional_import of a module", + "markdown": "Access to a protected member _optional_import of a module" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/MinervaExp.py", + "uri": "minerva/logger.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 84, - "startColumn": 5, - "charOffset": 3247, - "charLength": 19, + "startLine": 80, + "startColumn": 26, + "charOffset": 3116, + "charLength": 22, "snippet": { - "text": "utils._print_banner" + "text": "utils._optional_import" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 82, + "startLine": 78, "startColumn": 1, - "charOffset": 3214, - "charLength": 97, + "charOffset": 3037, + "charLength": 174, "snippet": { - "text": "\n # Print Minerva banner.\n utils._print_banner()\n\n with runner.WandbConnectionManager():" + "text": "TENSORBOARD_WRITER: Optional[Callable[..., Any]]\ntry:\n TENSORBOARD_WRITER = utils._optional_import(\n \"torch.utils.tensorboard.writer\",\n name=\"SummaryWriter\"," } } }, @@ -13969,9 +13968,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "5b3f689acab69ac91e0e73dd479ea7c734e7ce63da61fcc8cb88339a1cd15684" + "equalIndicator/v1": "e034a0862a3793122addab541f6c5d22bec03bd1e5a207b44d122dfdfc58ae47" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -13984,33 +13983,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _optional_import of a module", - "markdown": "Access to a protected member _optional_import of a module" + "text": "Access to a protected member _api of a class", + "markdown": "Access to a protected member _api of a class" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/logger.py", + "uri": "minerva/models/core.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 74, - "startColumn": 26, - "charOffset": 2721, - "charLength": 22, + "startColumn": 6, + "charOffset": 2704, + "charLength": 23, "snippet": { - "text": "utils._optional_import" + "text": "torchvision.models._api" }, "sourceLanguage": "Python" }, "contextRegion": { "startLine": 72, "startColumn": 1, - "charOffset": 2642, - "charLength": 174, + "charOffset": 2597, + "charLength": 194, "snippet": { - "text": "TENSORBOARD_WRITER: Optional[Callable[..., Any]]\ntry:\n TENSORBOARD_WRITER = utils._optional_import(\n \"torch.utils.tensorboard.writer\",\n name=\"SummaryWriter\"," + "text": "from torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim import Optimizer\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.utils.utils import func_by_str" } } }, @@ -14023,9 +14022,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "283e9350e23e3a241f0e51ef7a3730fec822fd729598a34dabd86387bff34c7f" + "equalIndicator/v1": "85194b32a9df1ca3eaa47bce774adfc234bdb37e1560600ca0571df4f89c5fbc" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -14045,13 +14044,13 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/resnet.py", + "uri": "scripts/TorchWeightDownloader.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 49, + "startLine": 41, "startColumn": 6, - "charOffset": 1956, + "charOffset": 1764, "charLength": 23, "snippet": { "text": "torchvision.models._api" @@ -14059,12 +14058,12 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 47, + "startLine": 39, "startColumn": 1, - "charOffset": 1890, - "charLength": 179, + "charOffset": 1730, + "charLength": 122, "snippet": { - "text": "from torch import Tensor\nfrom torch.nn.modules import Module\nfrom torchvision.models._api import WeightsEnum\nfrom torchvision.models.resnet import BasicBlock, Bottleneck, conv1x1\n" + "text": "from typing import Optional\n\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.models import get_torch_weights" } } }, @@ -14077,7 +14076,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "58f535c927ed38821a6bf64034af1e6ba0dd1c7f4bec40c249a1063639575a4d" + "equalIndicator/v1": "8edc833231284b872e1de9249066787847d2bdecd4d4899ac1255f753019891b" }, "baselineState": "unchanged", "properties": { @@ -14092,33 +14091,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _api of a class", - "markdown": "Access to a protected member _api of a class" + "text": "Access to a protected member _device of a class", + "markdown": "Access to a protected member _device of a class" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/core.py", + "uri": "minerva/utils/utils.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 68, - "startColumn": 6, - "charOffset": 2309, - "charLength": 23, + "startLine": 167, + "startColumn": 25, + "charOffset": 5957, + "charLength": 7, "snippet": { - "text": "torchvision.models._api" + "text": "_device" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 66, + "startLine": 165, "startColumn": 1, - "charOffset": 2202, - "charLength": 194, + "charOffset": 5860, + "charLength": 202, "snippet": { - "text": "from torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim import Optimizer\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.utils.utils import func_by_str" + "text": "from torch.nn import functional as F\nfrom torch.nn.modules import Module\nfrom torch.types import _device\nfrom torchgeo.datasets.utils import BoundingBox\nfrom urllib3.exceptions import NewConnectionError" } } }, @@ -14131,7 +14130,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "3afcbac0f548d4ed948601d249aa900427de87a460bec8334e3af59ed0198c66" + "equalIndicator/v1": "86a6bddc00367e8a3f9a4d9157e4f4fdfb39a542d88d2347f960998f29a46ff9" }, "baselineState": "unchanged", "properties": { @@ -14146,33 +14145,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _optional_import of a module", - "markdown": "Access to a protected member _optional_import of a module" + "text": "Access to a protected member _api of a class", + "markdown": "Access to a protected member _api of a class" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/trainer.py", + "uri": "minerva/models/resnet.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 92, - "startColumn": 26, - "charOffset": 3500, - "charLength": 22, + "startLine": 55, + "startColumn": 6, + "charOffset": 2351, + "charLength": 23, "snippet": { - "text": "utils._optional_import" + "text": "torchvision.models._api" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 90, + "startLine": 53, "startColumn": 1, - "charOffset": 3421, - "charLength": 174, + "charOffset": 2285, + "charLength": 179, "snippet": { - "text": "TENSORBOARD_WRITER: Optional[Callable[..., Any]]\ntry:\n TENSORBOARD_WRITER = utils._optional_import(\n \"torch.utils.tensorboard.writer\",\n name=\"SummaryWriter\"," + "text": "from torch import Tensor\nfrom torch.nn.modules import Module\nfrom torchvision.models._api import WeightsEnum\nfrom torchvision.models.resnet import BasicBlock, Bottleneck, conv1x1\n" } } }, @@ -14185,9 +14184,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "a517916a9cc5f214e7c89966489e74eb5c816f973a7f5afcaaeb8b1c8bbb8790" + "equalIndicator/v1": "b123567c68765eddbd89d1a4e9d0da9c700cd5b3689881e72f60a06513c65949" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -14211,9 +14210,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 507, - "startColumn": 19, - "charOffset": 24864, + "startLine": 98, + "startColumn": 26, + "charOffset": 3895, "charLength": 22, "snippet": { "text": "utils._optional_import" @@ -14221,12 +14220,12 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 505, + "startLine": 96, "startColumn": 1, - "charOffset": 24780, - "charLength": 180, + "charOffset": 3816, + "charLength": 174, "snippet": { - "text": " MinervaModel: Loaded model ready for use.\n \"\"\"\n convert = utils._optional_import(\n \"onnx2torch\", name=\"convert\", package=\"onnx2torch\"\n )" + "text": "TENSORBOARD_WRITER: Optional[Callable[..., Any]]\ntry:\n TENSORBOARD_WRITER = utils._optional_import(\n \"torch.utils.tensorboard.writer\",\n name=\"SummaryWriter\"," } } }, @@ -14239,9 +14238,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "d6812261c647b3320dd6a5e3034cd1a7636cfea5432f536cd2397e3fcd5e8de4" + "equalIndicator/v1": "8753bd526bbca4e67b485032f039e3a73042bea7adf3b36bbd36de79053cd85f" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WEAK WARNING", "tags": [ @@ -14254,33 +14253,33 @@ "kind": "fail", "level": "note", "message": { - "text": "Access to a protected member _device of a class", - "markdown": "Access to a protected member _device of a class" + "text": "Access to a protected member _optional_import of a module", + "markdown": "Access to a protected member _optional_import of a module" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/utils/utils.py", + "uri": "minerva/trainer.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 161, - "startColumn": 25, - "charOffset": 5562, - "charLength": 7, + "startLine": 513, + "startColumn": 21, + "charOffset": 25261, + "charLength": 22, "snippet": { - "text": "_device" + "text": "utils._optional_import" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 159, + "startLine": 511, "startColumn": 1, - "charOffset": 5465, - "charLength": 202, + "charOffset": 25175, + "charLength": 154, "snippet": { - "text": "from torch.nn import functional as F\nfrom torch.nn.modules import Module\nfrom torch.types import _device\nfrom torchgeo.datasets.utils import BoundingBox\nfrom urllib3.exceptions import NewConnectionError" + "text": " MinervaModel: Loaded model ready for use.\n \"\"\"\n onnx_load = utils._optional_import(\n \"onnx\",\n name=\"load\"," } } }, @@ -14293,7 +14292,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "531f072f1e58e66c2e976876813b34b42834617122665c6b2d26358d8abc6560" + "equalIndicator/v1": "727fdab426e0dad833b4daa30ef77e4e8975139f8bfc71fa5d9ddef75024989b" }, "baselineState": "unchanged", "properties": { @@ -14304,37 +14303,37 @@ } }, { - "ruleId": "PyCallingNonCallableInspection", + "ruleId": "PyProtectedMemberInspection", "kind": "fail", - "level": "warning", + "level": "note", "message": { - "text": "'MinervaModel' object is not callable", - "markdown": "'MinervaModel' object is not callable" + "text": "Access to a protected member _optional_import of a module", + "markdown": "Access to a protected member _optional_import of a module" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/siamese.py", + "uri": "minerva/trainer.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 196, - "startColumn": 35, - "charOffset": 7912, - "charLength": 16, + "startLine": 518, + "startColumn": 19, + "charOffset": 25386, + "charLength": 22, "snippet": { - "text": "self.backbone(x)" + "text": "utils._optional_import" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 194, + "startLine": 516, "startColumn": 1, - "charOffset": 7759, - "charLength": 225, + "charOffset": 25330, + "charLength": 133, "snippet": { - "text": " :attr:`~SimCLR.proj_head` and the detached embedding vector from the :attr:`~SimCLR.backbone`.\n \"\"\"\n f: Tensor = torch.flatten(self.backbone(x)[0], start_dim=1)\n g: Tensor = self.proj_head(f)\n" + "text": " package=\"onnx\",\n )\n convert = utils._optional_import(\n \"onnx2torch\",\n name=\"convert\"," } } }, @@ -14347,11 +14346,11 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "440d0d6164eb55add5097c708f17f32430b5524f4d53902cf26da9d5ff24b67b" + "equalIndicator/v1": "890cac7256809f799dc4deb59ccc52bd695801e7c5b5ec894dc7618dcb19e6b7" }, - "baselineState": "unchanged", + "baselineState": "new", "properties": { - "ideaSeverity": "WARNING", + "ideaSeverity": "WEAK WARNING", "tags": [ "Python" ] @@ -14373,9 +14372,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 335, - "startColumn": 50, - "charOffset": 13332, + "startLine": 203, + "startColumn": 35, + "charOffset": 8340, "charLength": 16, "snippet": { "text": "self.backbone(x)" @@ -14383,12 +14382,12 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 333, + "startLine": 201, "startColumn": 1, - "charOffset": 13189, - "charLength": 246, + "charOffset": 8187, + "charLength": 225, "snippet": { - "text": " and the detached embedding vector from the :attr:`~SimSiam.backbone`.\n \"\"\"\n z: Tensor = self.proj_head(torch.flatten(self.backbone(x)[0], start_dim=1)) # type: ignore[attr-defined]\n\n p: Tensor = self.predictor(z)" + "text": " :attr:`~SimCLR.proj_head` and the detached embedding vector from the :attr:`~SimCLR.backbone`.\n \"\"\"\n f: Tensor = torch.flatten(self.backbone(x)[0], start_dim=1)\n g: Tensor = self.proj_head(f)\n" } } }, @@ -14401,7 +14400,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "3431a138921d1618c632f38d11ba90860909f432930ee7af7a79fd3088b30c94" + "equalIndicator/v1": "c18e36042736ca5fc9db0758708a66daf7f209fd50e0d413f00e0e3d3e06c695" }, "baselineState": "unchanged", "properties": { @@ -14423,13 +14422,13 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/fcn.py", + "uri": "minerva/models/siamese.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 145, - "startColumn": 13, - "charOffset": 6373, + "startLine": 342, + "startColumn": 50, + "charOffset": 13760, "charLength": 16, "snippet": { "text": "self.backbone(x)" @@ -14437,12 +14436,12 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 143, + "startLine": 340, "startColumn": 1, - "charOffset": 6293, - "charLength": 125, + "charOffset": 13617, + "charLength": 246, "snippet": { - "text": " each pixel input ``x`` being of that class.\n \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n" + "text": " and the detached embedding vector from the :attr:`~SimSiam.backbone`.\n \"\"\"\n z: Tensor = self.proj_head(torch.flatten(self.backbone(x)[0], start_dim=1)) # type: ignore[attr-defined]\n\n p: Tensor = self.predictor(z)" } } }, @@ -14455,7 +14454,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "6a46479e688eb0d6701090c4051fc439911f19e809fe9c4b9df2b0b8e35f7da1" + "equalIndicator/v1": "befd674c3a7b6b78b952a337ae397f0542dff105bc7cdee67c086e2379a22666" }, "baselineState": "unchanged", "properties": { @@ -14470,8 +14469,8 @@ "kind": "fail", "level": "warning", "message": { - "text": "'DCN' object is not callable", - "markdown": "'DCN' object is not callable" + "text": "'MinervaModel' object is not callable", + "markdown": "'MinervaModel' object is not callable" }, "locations": [ { @@ -14481,22 +14480,22 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 146, + "startLine": 151, "startColumn": 13, - "charOffset": 6402, - "charLength": 15, + "charOffset": 6768, + "charLength": 16, "snippet": { - "text": "self.decoder(z)" + "text": "self.backbone(x)" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 144, + "startLine": 149, "startColumn": 1, - "charOffset": 6349, - "charLength": 106, + "charOffset": 6688, + "charLength": 125, "snippet": { - "text": " \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n\n assert isinstance(z, Tensor)" + "text": " each pixel input ``x`` being of that class.\n \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n" } } }, @@ -14509,7 +14508,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "2927ea3e42e3065d12d1d97e641c676655c6e15da8346da90e871bf498ca8cf9" + "equalIndicator/v1": "9b4b69d484dd7fc7d751afda6364cd0a1831337b17d8821c8617837a0800cefa" }, "baselineState": "unchanged", "properties": { @@ -14524,33 +14523,33 @@ "kind": "fail", "level": "warning", "message": { - "text": "'ResNet' object is not callable", - "markdown": "'ResNet' object is not callable" + "text": "'DCN' object is not callable", + "markdown": "'DCN' object is not callable" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/resnet.py", + "uri": "minerva/models/fcn.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 442, - "startColumn": 75, - "charOffset": 20243, - "charLength": 37, + "startLine": 152, + "startColumn": 13, + "charOffset": 6797, + "charLength": 15, "snippet": { - "text": "self.network(\n x\n )" + "text": "self.decoder(z)" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 440, + "startLine": 150, "startColumn": 1, - "charOffset": 20044, - "charLength": 291, + "charOffset": 6744, + "charLength": 106, "snippet": { - "text": " :class:`~torch.Tensor` of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n z: Union[Tensor, Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]] = self.network(\n x\n )\n if isinstance(z, Tensor):\n return z" + "text": " \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n\n assert isinstance(z, Tensor)" } } }, @@ -14563,7 +14562,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "50a18d2bc3c825f8ff63de94525e7fbeacde96fde832628826c547164d6fbca6" + "equalIndicator/v1": "72bcf71597f326c690e55026c368597c94e3d65c42167f79c75079b3f51574c1" }, "baselineState": "unchanged", "properties": { @@ -14589,9 +14588,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 196, + "startLine": 202, "startColumn": 13, - "charOffset": 7019, + "charOffset": 7414, "charLength": 12, "snippet": { "text": "self.conv(x)" @@ -14599,9 +14598,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 194, + "startLine": 200, "startColumn": 1, - "charOffset": 6937, + "charOffset": 7332, "charLength": 132, "snippet": { "text": " x = torch.cat([x2, x1], dim=1) # type: ignore[attr-defined]\n\n x = self.conv(x)\n\n assert isinstance(x, Tensor)" @@ -14617,7 +14616,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7df571e22d54396e7f5b699c9f3a091c5c7ca6de8c8c2cfca5b3d6d06c2beec9" + "equalIndicator/v1": "59cdccaaf5455e272b1da40fee0e3eefe7a6936d62b020d15e63a257abf15358" }, "baselineState": "unchanged", "properties": { @@ -14643,9 +14642,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 297, + "startLine": 303, "startColumn": 14, - "charOffset": 10619, + "charOffset": 11014, "charLength": 11, "snippet": { "text": "self.inc(x)" @@ -14653,9 +14652,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 295, + "startLine": 301, "startColumn": 1, - "charOffset": 10593, + "charOffset": 10988, "charLength": 93, "snippet": { "text": " \"\"\"\n\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)" @@ -14671,7 +14670,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "40bec14293b9526ea5eba18c7ad3b41a5202a8cdd1457491241bfc27c04110c8" + "equalIndicator/v1": "e1d1e7ed1a3fa375042a91dcf7fdf9552f48d9dc1488b4a56c8741768164232e" }, "baselineState": "unchanged", "properties": { @@ -14697,9 +14696,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 298, + "startLine": 304, "startColumn": 14, - "charOffset": 10644, + "charOffset": 11039, "charLength": 14, "snippet": { "text": "self.down1(x1)" @@ -14707,9 +14706,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 296, + "startLine": 302, "startColumn": 1, - "charOffset": 10605, + "charOffset": 11000, "charLength": 109, "snippet": { "text": "\n x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)" @@ -14725,7 +14724,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "834e8fbb9c80c75b13ac8f184680f4fc8a742521d5309407c7b664f1ba17b5fe" + "equalIndicator/v1": "81a0142ac17b20768ccba62d295b0b99cd0c0eb9b7fc7877dda1f4b32f6084ff" }, "baselineState": "unchanged", "properties": { @@ -14751,9 +14750,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 299, + "startLine": 305, "startColumn": 14, - "charOffset": 10672, + "charOffset": 11067, "charLength": 14, "snippet": { "text": "self.down2(x2)" @@ -14761,9 +14760,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 297, + "startLine": 303, "startColumn": 1, - "charOffset": 10606, + "charOffset": 11001, "charLength": 136, "snippet": { "text": " x1 = self.inc(x)\n x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)" @@ -14779,7 +14778,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "932f73fd23e37a1ab5b2fdab6775999c6172c779451bdb8581bae295acdbe30c" + "equalIndicator/v1": "5f8c8654679bf9533faa6f59f7e41278769c8da5791abee78c31219533972127" }, "baselineState": "unchanged", "properties": { @@ -14805,9 +14804,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 300, + "startLine": 306, "startColumn": 14, - "charOffset": 10700, + "charOffset": 11095, "charLength": 14, "snippet": { "text": "self.down3(x3)" @@ -14815,9 +14814,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 298, + "startLine": 304, "startColumn": 1, - "charOffset": 10631, + "charOffset": 11026, "charLength": 112, "snippet": { "text": " x2 = self.down1(x1)\n x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n" @@ -14833,7 +14832,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "2a9c0449753c44e6b6bb7d3530d01a1ccca05d970c0a037dffdaefc354e6a282" + "equalIndicator/v1": "a3ec6f75eb20ab8dda329cb0b2a491ac4244c780139c94ce0e227730d8964d9b" }, "baselineState": "unchanged", "properties": { @@ -14859,9 +14858,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 301, + "startLine": 307, "startColumn": 14, - "charOffset": 10728, + "charOffset": 11123, "charLength": 14, "snippet": { "text": "self.down4(x4)" @@ -14869,9 +14868,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 299, + "startLine": 305, "startColumn": 1, - "charOffset": 10659, + "charOffset": 11054, "charLength": 113, "snippet": { "text": " x3 = self.down2(x2)\n x4 = self.down3(x3)\n x5 = self.down4(x4)\n\n x = self.up1(x5, x4)" @@ -14887,7 +14886,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7c0a4134e3baa4d337dac7816fa520bbe900d6738f8b4d6c75073a673a3cbb29" + "equalIndicator/v1": "b31414d06e4c1de2a90d881616aa8edbe24a3155665ae7f988747f1ffe59033a" }, "baselineState": "unchanged", "properties": { @@ -14913,9 +14912,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 303, + "startLine": 309, "startColumn": 13, - "charOffset": 10756, + "charOffset": 11151, "charLength": 16, "snippet": { "text": "self.up1(x5, x4)" @@ -14923,9 +14922,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 301, + "startLine": 307, "startColumn": 1, - "charOffset": 10715, + "charOffset": 11110, "charLength": 113, "snippet": { "text": " x5 = self.down4(x4)\n\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)" @@ -14941,7 +14940,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "cc3e5fb482379512a957170b804e6ac254caa904a5454d4604ea99797a5dad18" + "equalIndicator/v1": "39e5bf1c8ce3c1934ddf82c1d2e475f1ce243034d0d670560e6f99dd4c153064" }, "baselineState": "unchanged", "properties": { @@ -14967,9 +14966,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 304, + "startLine": 310, "startColumn": 13, - "charOffset": 10785, + "charOffset": 11180, "charLength": 15, "snippet": { "text": "self.up2(x, x3)" @@ -14977,9 +14976,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 302, + "startLine": 308, "startColumn": 1, - "charOffset": 10743, + "charOffset": 11138, "charLength": 113, "snippet": { "text": "\n x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)" @@ -14995,7 +14994,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "a98dbd684a8aea9b10354e90ec179f806cb496a8246f0b31957b9ebf748d16dd" + "equalIndicator/v1": "fec3b3937ead639dc95c5e2db24ea09c6a981b959797b7a8c14536dfccc2f7b4" }, "baselineState": "unchanged", "properties": { @@ -15021,9 +15020,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 305, + "startLine": 311, "startColumn": 13, - "charOffset": 10813, + "charOffset": 11208, "charLength": 15, "snippet": { "text": "self.up3(x, x2)" @@ -15031,9 +15030,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 303, + "startLine": 309, "startColumn": 1, - "charOffset": 10744, + "charOffset": 11139, "charLength": 113, "snippet": { "text": " x = self.up1(x5, x4)\n x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n" @@ -15049,7 +15048,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "a2574a7afb976293ee1e92e36cb0d35ab321fc71df0a92599ff2b9b7dcbc9522" + "equalIndicator/v1": "07897d4e9edd80743b264e2d7744cbaf6d88ee7dc16b490c0cf36fceecd786ad" }, "baselineState": "unchanged", "properties": { @@ -15075,9 +15074,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 306, + "startLine": 312, "startColumn": 13, - "charOffset": 10841, + "charOffset": 11236, "charLength": 15, "snippet": { "text": "self.up4(x, x1)" @@ -15085,9 +15084,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 304, + "startLine": 310, "startColumn": 1, - "charOffset": 10773, + "charOffset": 11168, "charLength": 122, "snippet": { "text": " x = self.up2(x, x3)\n x = self.up3(x, x2)\n x = self.up4(x, x1)\n\n logits: Tensor = self.outc(x)" @@ -15103,7 +15102,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7183af66a0e5745209ac17dd2e1f1d71f916862830722e1d562faeac25ad7156" + "equalIndicator/v1": "9d5fc80a719f087ef61dc29890fcdfc31f4dfb7eb6408be97ef073e4b0fe4994" }, "baselineState": "unchanged", "properties": { @@ -15129,9 +15128,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 308, + "startLine": 314, "startColumn": 26, - "charOffset": 10883, + "charOffset": 11278, "charLength": 12, "snippet": { "text": "self.outc(x)" @@ -15139,9 +15138,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 306, + "startLine": 312, "startColumn": 1, - "charOffset": 10829, + "charOffset": 11224, "charLength": 109, "snippet": { "text": " x = self.up4(x, x1)\n\n logits: Tensor = self.outc(x)\n\n assert isinstance(logits, Tensor)" @@ -15157,7 +15156,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7944de39214427459d02695b4d403088b29da5ae0f440246683b2942c209f168" + "equalIndicator/v1": "0db6e8f1dd90b6c9ce7bb351050b72bf642a91ec7e35899dff0f33494e26e5d3" }, "baselineState": "unchanged", "properties": { @@ -15183,9 +15182,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 418, + "startLine": 424, "startColumn": 30, - "charOffset": 15966, + "charOffset": 16361, "charLength": 16, "snippet": { "text": "self.backbone(x)" @@ -15193,9 +15192,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 416, + "startLine": 422, "startColumn": 1, - "charOffset": 15860, + "charOffset": 16255, "charLength": 182, "snippet": { "text": " \"\"\"\n # Output tensors from the residual blocks of the resnet.\n x4, x3, x2, x1, x0 = self.backbone(x)\n\n # Concats and upsamples the outputs of the resnet." @@ -15211,7 +15210,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "dadf58470c26da62bf63a819402868f18ffc7cb6e66135feaa11b60977d04a50" + "equalIndicator/v1": "d8dcf79ba878e42b8c49dbf95734bc5edfa877adb1a1bfc0f2552e2ad44cadbe" }, "baselineState": "unchanged", "properties": { @@ -15237,9 +15236,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 421, + "startLine": 427, "startColumn": 13, - "charOffset": 16055, + "charOffset": 16450, "charLength": 16, "snippet": { "text": "self.up1(x4, x3)" @@ -15247,9 +15246,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 419, + "startLine": 425, "startColumn": 1, - "charOffset": 15983, + "charOffset": 16378, "charLength": 144, "snippet": { "text": "\n # Concats and upsamples the outputs of the resnet.\n x = self.up1(x4, x3)\n x = self.up2(x, x2)\n x = self.up3(x, x1)" @@ -15265,7 +15264,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "d290eb7f6f9c95f0b09aaf68df57f1ebf3019c2e16a7fa6ab37c05f4087f2092" + "equalIndicator/v1": "e478bb4407f0cea0bbd51d6253393a3b03323afd7a7ab0453684436f3a12d846" }, "baselineState": "unchanged", "properties": { @@ -15291,9 +15290,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 422, + "startLine": 428, "startColumn": 13, - "charOffset": 16084, + "charOffset": 16479, "charLength": 15, "snippet": { "text": "self.up2(x, x2)" @@ -15301,9 +15300,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 420, + "startLine": 426, "startColumn": 1, - "charOffset": 15984, + "charOffset": 16379, "charLength": 144, "snippet": { "text": " # Concats and upsamples the outputs of the resnet.\n x = self.up1(x4, x3)\n x = self.up2(x, x2)\n x = self.up3(x, x1)\n" @@ -15319,7 +15318,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "9bd6bce0e6e2e274013c76236c06913f86fcdd79b5309c412dfaf0ff81ee37af" + "equalIndicator/v1": "8aa334e6b4809e612776ba376d796aebf187542483aae7a97d637202271ac79d" }, "baselineState": "unchanged", "properties": { @@ -15345,9 +15344,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 423, + "startLine": 429, "startColumn": 13, - "charOffset": 16112, + "charOffset": 16507, "charLength": 15, "snippet": { "text": "self.up3(x, x1)" @@ -15355,9 +15354,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 421, + "startLine": 427, "startColumn": 1, - "charOffset": 16043, + "charOffset": 16438, "charLength": 193, "snippet": { "text": " x = self.up1(x4, x3)\n x = self.up2(x, x2)\n x = self.up3(x, x1)\n\n # Add the upsampled and deconv tensor to the output of the input convolutional layer of the resnet." @@ -15373,7 +15372,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "f2bba61ea60f253561e68c790608a338f537858317def388d9354ae0cccca440" + "equalIndicator/v1": "1043a3a2a387aed59e675fbc226013b7f0b757723f7cb196c0cded0d1d723328" }, "baselineState": "unchanged", "properties": { @@ -15399,9 +15398,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 434, + "startLine": 440, "startColumn": 26, - "charOffset": 16523, + "charOffset": 16918, "charLength": 12, "snippet": { "text": "self.outc(x)" @@ -15409,9 +15408,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 432, + "startLine": 438, "startColumn": 1, - "charOffset": 16412, + "charOffset": 16807, "charLength": 166, "snippet": { "text": "\n # Reduces the latent channels to the number of classes for the ouput tensor.\n logits: Tensor = self.outc(x)\n\n assert isinstance(logits, Tensor)" @@ -15427,7 +15426,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "18a6c79402a3856f2d70122d3508d254e92b00c3de70ad229cdafef4edbfc729" + "equalIndicator/v1": "89c5b79031f7b3afd3a4b43f522da51a84c82903fe53cd34ec06ee9121489e32" }, "baselineState": "unchanged", "properties": { @@ -15442,33 +15441,33 @@ "kind": "fail", "level": "warning", "message": { - "text": "'MinervaSiamese' object is not callable", - "markdown": "'MinervaSiamese' object is not callable" + "text": "'ResNet' object is not callable", + "markdown": "'ResNet' object is not callable" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/trainer.py", + "uri": "minerva/models/resnet.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1035, - "startColumn": 34, - "charOffset": 47271, - "charLength": 20, + "startLine": 448, + "startColumn": 75, + "charOffset": 20638, + "charLength": 37, "snippet": { - "text": "self.model(val_data)" + "text": "self.network(\n x\n )" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1033, + "startLine": 446, "startColumn": 1, - "charOffset": 47147, - "charLength": 190, + "charOffset": 20439, + "charLength": 291, "snippet": { - "text": " feature, _ = self.model.forward_single(val_data)\n else:\n feature, _ = self.model(val_data)\n\n feature_list.append(feature)" + "text": " :class:`~torch.Tensor` of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n z: Union[Tensor, Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]] = self.network(\n x\n )\n if isinstance(z, Tensor):\n return z" } } }, @@ -15481,7 +15480,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "66e7e0f72909cb25c5503323177c8c0ddf3170ab8c2e04a1f557fcf4160141ed" + "equalIndicator/v1": "c9f825c305bb8d6b64645239c6990d54227bd05bb6c92215036186e36a1f97e8" }, "baselineState": "unchanged", "properties": { @@ -15507,22 +15506,22 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1063, + "startLine": 1049, "startColumn": 34, - "charOffset": 48513, - "charLength": 21, + "charOffset": 47830, + "charLength": 20, "snippet": { - "text": "self.model(test_data)" + "text": "self.model(val_data)" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1061, + "startLine": 1047, "startColumn": 1, - "charOffset": 48388, - "charLength": 187, + "charOffset": 47706, + "charLength": 190, "snippet": { - "text": " feature, _ = self.model.forward_single(test_data)\n else:\n feature, _ = self.model(test_data)\n\n total_num += batch_size" + "text": " feature, _ = self.model.forward_single(val_data)\n else:\n feature, _ = self.model(val_data)\n\n feature_list.append(feature)" } } }, @@ -15535,7 +15534,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "f2fda796ed7e140db05e1f2b5e4fcdc9970f4c462b0d12c0218d4356a0961b53" + "equalIndicator/v1": "d83f9f725c84560c575f14c978170fa2d80a2684e9af041d829a840ed94afc16" }, "baselineState": "unchanged", "properties": { @@ -15546,37 +15545,37 @@ } }, { - "ruleId": "PyRedeclarationInspection", + "ruleId": "PyCallingNonCallableInspection", "kind": "fail", "level": "warning", "message": { - "text": "Redeclared '__call__' defined above without usage", - "markdown": "Redeclared '__call__' defined above without usage" + "text": "'MinervaSiamese' object is not callable", + "markdown": "'MinervaSiamese' object is not callable" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/transforms.py", + "uri": "minerva/trainer.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 336, - "startColumn": 9, - "charOffset": 11547, - "charLength": 8, + "startLine": 1077, + "startColumn": 34, + "charOffset": 49072, + "charLength": 21, "snippet": { - "text": "__call__" + "text": "self.model(test_data)" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 334, + "startLine": 1075, "startColumn": 1, - "charOffset": 11506, - "charLength": 142, + "charOffset": 48947, + "charLength": 187, "snippet": { - "text": " ... # pragma: no cover\n\n def __call__(\n self, sample: Union[Tensor, Dict[str, Any]]\n ) -> Union[Tensor, Dict[str, Any]]:" + "text": " feature, _ = self.model.forward_single(test_data)\n else:\n feature, _ = self.model(test_data)\n\n total_num += batch_size" } } }, @@ -15589,7 +15588,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "f6b3b93fed5c0889674b0d51c15959b481988cc4597239f72e778e7e923bd719" + "equalIndicator/v1": "d85ce7c2ef3c511d41f1b2697a6ef17a501e8b9d6339fbbc38f7a495ca15041b" }, "baselineState": "unchanged", "properties": { @@ -15615,9 +15614,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 153, + "startLine": 159, "startColumn": 9, - "charOffset": 5897, + "charOffset": 6292, "charLength": 4, "snippet": { "text": "step" @@ -15625,9 +15624,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 151, + "startLine": 157, "startColumn": 1, - "charOffset": 5856, + "charOffset": 6251, "charLength": 79, "snippet": { "text": " ... # pragma: no cover\n\n def step(\n self,\n x: Tensor," @@ -15643,7 +15642,61 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "02c89ea6d5f7ca7e662560a4f01054f644aa0fb4975f2a339d40302e27100846" + "equalIndicator/v1": "19314d5612acf4d0305b7c37e2e3a8bd113753782152d511640264ac6793ea4c" + }, + "baselineState": "unchanged", + "properties": { + "ideaSeverity": "WARNING", + "tags": [ + "Python" + ] + } + }, + { + "ruleId": "PyRedeclarationInspection", + "kind": "fail", + "level": "warning", + "message": { + "text": "Redeclared '__call__' defined above without usage", + "markdown": "Redeclared '__call__' defined above without usage" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "minerva/transforms.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 342, + "startColumn": 9, + "charOffset": 11942, + "charLength": 8, + "snippet": { + "text": "__call__" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 340, + "startColumn": 1, + "charOffset": 11901, + "charLength": 142, + "snippet": { + "text": " ... # pragma: no cover\n\n def __call__(\n self, sample: Union[Tensor, Dict[str, Any]]\n ) -> Union[Tensor, Dict[str, Any]]:" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "56019c7f80598bb9543101e7030f0adc2fcbc2feec91a1bb03798ff6ed782b52" }, "baselineState": "unchanged", "properties": { @@ -15669,9 +15722,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 328, + "startLine": 334, "startColumn": 13, - "charOffset": 11529, + "charOffset": 11924, "charLength": 8, "snippet": { "text": "__call__" @@ -15679,9 +15732,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 326, + "startLine": 332, "startColumn": 1, - "charOffset": 11480, + "charOffset": 11875, "charLength": 202, "snippet": { "text": " ... # pragma: no cover\n\n def __call__(self, batch: Union[Dict[str, Any], Tensor]) -> Dict[str, Any]:\n if isinstance(batch, Tensor):\n return self.wrap(batch)" @@ -15697,7 +15750,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "7ade1ea496a5819d9a54c5f1aa0f4c1368c150fd135cafd1e28e0e6f08f7b320" + "equalIndicator/v1": "7f71d8167c9cd747693273e4bc737428018b15e95d8ccb547f1b601c547a671f" }, "baselineState": "unchanged", "properties": { @@ -15723,9 +15776,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 462, + "startLine": 468, "startColumn": 5, - "charOffset": 15174, + "charOffset": 15569, "charLength": 16, "snippet": { "text": "_optional_import" @@ -15733,9 +15786,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 460, + "startLine": 466, "startColumn": 1, - "charOffset": 15168, + "charOffset": 15563, "charLength": 145, "snippet": { "text": "\n\ndef _optional_import(\n module: str, *, name: Optional[str] = None, package: Optional[str] = None\n) -> Union[ModuleType, Callable[..., Any]]:" @@ -15751,9 +15804,9 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "886709978bf88838722cb973bd0584da4517854c770257ef7ebeac54bccb8bb4" + "equalIndicator/v1": "27c3e0ea889df7346c4e31d07e6a95b08b9f5bb0e37935de4321d139cb4e1704" }, - "baselineState": "new", + "baselineState": "unchanged", "properties": { "ideaSeverity": "WARNING", "tags": [ @@ -15777,9 +15830,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 682, + "startLine": 688, "startColumn": 5, - "charOffset": 21459, + "charOffset": 21855, "charLength": 21, "snippet": { "text": "transform_coordinates" @@ -15787,9 +15840,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 680, + "startLine": 686, "startColumn": 1, - "charOffset": 21453, + "charOffset": 21849, "charLength": 104, "snippet": { "text": "\n\ndef transform_coordinates(\n x: Union[Sequence[float], float],\n y: Union[Sequence[float], float]," @@ -15805,7 +15858,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "f66824ff26dc67bd35267f79fae12eb0500385b0af4df2972c580829ca3e1bf5" + "equalIndicator/v1": "d43a2c5d902aa223dd4171a05892a075395312a20deec734769f731108b96a41" }, "baselineState": "unchanged", "properties": { @@ -15831,9 +15884,9 @@ "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1117, + "startLine": 1123, "startColumn": 5, - "charOffset": 37288, + "charOffset": 37684, "charLength": 14, "snippet": { "text": "mask_transform" @@ -15841,9 +15894,9 @@ "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1115, + "startLine": 1121, "startColumn": 1, - "charOffset": 37282, + "charOffset": 37678, "charLength": 98, "snippet": { "text": "\n\ndef mask_transform(\n array: Union[NDArray[Any, Int], LongTensor],\n matrix: Dict[int, int]," @@ -15859,7 +15912,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "57d7f428152c2a979b359d0b53a1f06fced7cdc575b87c7788e32d3bb3c9efff" + "equalIndicator/v1": "26a6ce3aba2e8299fa9c15a5d52f775806a754563ed4d3a6c3b860970df5a70c" }, "baselineState": "unchanged", "properties": { @@ -15871,8 +15924,8 @@ } ], "automationDetails": { - "id": "project/qodana/2023-04-24", - "guid": "64b989ed-cbeb-4a30-94b9-0c642d52f570", + "id": "project/qodana/2023-05-11", + "guid": "a8b0e039-587e-4b3e-ba79-43da994091f1", "properties": { "jobUrl": "" } @@ -15895,16 +15948,316 @@ { "physicalLocation": { "artifactLocation": { - "uri": "setup.py", + "uri": "notebooks/Torchgeo_FCN_Small.ipynb", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 6620, + "snippet": { + "text": "{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"import os\\n\",\n \"import tempfile\\n\",\n \"from pathlib import Path\\n\",\n \"\\n\",\n \"from torch.utils.data import DataLoader\\n\",\n \"from torchvision.models.segmentation import fcn_resnet50\\n\",\n \"import torch.nn as nn\\n\",\n \"from torchgeo.datasets import NAIP, ChesapeakeDE, stack_samples\\n\",\n \"from torchgeo.datasets.utils import download_url\\n\",\n \"from torchgeo.samplers import RandomGeoSampler\\n\",\n \"from torch.nn import CrossEntropyLoss\\n\",\n \"from torch.optim import Adam\\n\",\n \"import torch\\n\",\n \"import numpy as np\\n\",\n \"import matplotlib.pyplot as plt\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from minerva.models import FCN8ResNet18\\n\",\n \"from minerva.utils.utils import get_cuda_device\\n\",\n \"\\n\",\n \"device = get_cuda_device(0)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"data_root = tempfile.gettempdir()\\n\",\n \"train_root = Path(data_root, \\\"naip\\\", \\\"train\\\")\\n\",\n \"test_root = Path(data_root, \\\"naip\\\", \\\"test\\\")\\n\",\n \"naip_url = \\\"https://naipeuwest.blob.core.windows.net/naip/v002/de/2018/de_060cm_2018/38075/\\\"\\n\",\n \"tiles = [\\n\",\n \" \\\"m_3807511_ne_18_060_20181104.tif\\\",\\n\",\n \" \\\"m_3807511_se_18_060_20181104.tif\\\",\\n\",\n \" \\\"m_3807512_nw_18_060_20180815.tif\\\",\\n\",\n \"]\\n\",\n \"\\n\",\n \"for tile in tiles:\\n\",\n \" download_url(naip_url + tile, train_root)\\n\",\n \"\\n\",\n \"download_url(naip_url + \\\"m_3807512_sw_18_060_20180815.tif\\\", test_root)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"train_naip = NAIP(train_root)\\n\",\n \"test_naip = NAIP(test_root)\\n\",\n \"\\n\",\n \"chesapeake_root = os.path.join(data_root, \\\"chesapeake\\\")\\n\",\n \"\\n\",\n \"chesapeake = ChesapeakeDE(chesapeake_root, crs=train_naip.crs, res=train_naip.res, download=True)\\n\",\n \"\\n\",\n \"train_dataset = train_naip & chesapeake\\n\",\n \"test_dataset = test_naip & chesapeake\\n\",\n \"\\n\",\n \"sampler = RandomGeoSampler(train_naip, size=256, length=200)\\n\",\n \"dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\\n\",\n \"\\n\",\n \"testsampler = RandomGeoSampler(test_naip, size=256, length=8)\\n\",\n \"testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\\n\",\n \"testdata = list(testdataloader)[0]\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"crit = CrossEntropyLoss()\\n\",\n \"\\n\",\n \"# Criterions are normally parsed to models at init in minerva.\\n\",\n \"fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"# Optimisers need to be set to a model in minerva before training.\\n\",\n \"fcn.set_optimiser(opt)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" \\n\",\n \" # Uses MinervaModel.step.\\n\",\n \" loss, pred = fcn.step(image, target, train=True)\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"fcn = fcn_resnet50(num_classes=13).to(device)\\n\",\n \"fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\\n\",\n \"\\n\",\n \"crit = CrossEntropyLoss()\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \"\\n\",\n \" opt.zero_grad()\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \" loss = crit(pred, target)\\n\",\n \" loss.backward()\\n\",\n \" opt.step()\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"minerva-310\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0]\"\n },\n \"orig_nbformat\": 4,\n \"vscode\": {\n \"interpreter\": {\n \"hash\": \"3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff\"\n }\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n" + }, + "sourceLanguage": "JupyterPython" + }, + "contextRegion": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 6620, + "snippet": { + "text": "{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"import os\\n\",\n \"import tempfile\\n\",\n \"from pathlib import Path\\n\",\n \"\\n\",\n \"from torch.utils.data import DataLoader\\n\",\n \"from torchvision.models.segmentation import fcn_resnet50\\n\",\n \"import torch.nn as nn\\n\",\n \"from torchgeo.datasets import NAIP, ChesapeakeDE, stack_samples\\n\",\n \"from torchgeo.datasets.utils import download_url\\n\",\n \"from torchgeo.samplers import RandomGeoSampler\\n\",\n \"from torch.nn import CrossEntropyLoss\\n\",\n \"from torch.optim import Adam\\n\",\n \"import torch\\n\",\n \"import numpy as np\\n\",\n \"import matplotlib.pyplot as plt\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from minerva.models import FCN8ResNet18\\n\",\n \"from minerva.utils.utils import get_cuda_device\\n\",\n \"\\n\",\n \"device = get_cuda_device(0)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"data_root = tempfile.gettempdir()\\n\",\n \"train_root = Path(data_root, \\\"naip\\\", \\\"train\\\")\\n\",\n \"test_root = Path(data_root, \\\"naip\\\", \\\"test\\\")\\n\",\n \"naip_url = \\\"https://naipeuwest.blob.core.windows.net/naip/v002/de/2018/de_060cm_2018/38075/\\\"\\n\",\n \"tiles = [\\n\",\n \" \\\"m_3807511_ne_18_060_20181104.tif\\\",\\n\",\n \" \\\"m_3807511_se_18_060_20181104.tif\\\",\\n\",\n \" \\\"m_3807512_nw_18_060_20180815.tif\\\",\\n\",\n \"]\\n\",\n \"\\n\",\n \"for tile in tiles:\\n\",\n \" download_url(naip_url + tile, train_root)\\n\",\n \"\\n\",\n \"download_url(naip_url + \\\"m_3807512_sw_18_060_20180815.tif\\\", test_root)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"train_naip = NAIP(train_root)\\n\",\n \"test_naip = NAIP(test_root)\\n\",\n \"\\n\",\n \"chesapeake_root = os.path.join(data_root, \\\"chesapeake\\\")\\n\",\n \"\\n\",\n \"chesapeake = ChesapeakeDE(chesapeake_root, crs=train_naip.crs, res=train_naip.res, download=True)\\n\",\n \"\\n\",\n \"train_dataset = train_naip & chesapeake\\n\",\n \"test_dataset = test_naip & chesapeake\\n\",\n \"\\n\",\n \"sampler = RandomGeoSampler(train_naip, size=256, length=200)\\n\",\n \"dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\\n\",\n \"\\n\",\n \"testsampler = RandomGeoSampler(test_naip, size=256, length=8)\\n\",\n \"testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\\n\",\n \"testdata = list(testdataloader)[0]\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"crit = CrossEntropyLoss()\\n\",\n \"\\n\",\n \"# Criterions are normally parsed to models at init in minerva.\\n\",\n \"fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"# Optimisers need to be set to a model in minerva before training.\\n\",\n \"fcn.set_optimiser(opt)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" \\n\",\n \" # Uses MinervaModel.step.\\n\",\n \" loss, pred = fcn.step(image, target, train=True)\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"fcn = fcn_resnet50(num_classes=13).to(device)\\n\",\n \"fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\\n\",\n \"\\n\",\n \"crit = CrossEntropyLoss()\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \"\\n\",\n \" opt.zero_grad()\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \" loss = crit(pred, target)\\n\",\n \" loss.backward()\\n\",\n \" opt.step()\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"minerva-310\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.10.9 (main, Jan 11 2023, 15:21:40) [GCC 11.2.0]\"\n },\n \"orig_nbformat\": 4,\n \"vscode\": {\n \"interpreter\": {\n \"hash\": \"3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff\"\n }\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "45b8b345ff27cb1fd271b8ce4b6451706fdd63ef543171cb22bed1bd47349c37" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyInterpreterInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "No Python interpreter configured for the project", + "markdown": "No Python interpreter configured for the project" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "docs/conf.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 2969, + "snippet": { + "text": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../minerva/\"))\n\nimport minerva # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"minerva\"\ncopyright = \"2023, Harry Baker\"\nauthor = minerva.__author__\nversion = minerva.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"myst_parser\",\n]\n\nsource_suffix = [\".rst\", \".md\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"typing\": (\"https://typing.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchgeo\": (\"https://torchgeo.readthedocs.io/en/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"pandas\": (\"https://pandas.pydata.org/docs/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/stable/\", None),\n \"pillow\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"source/_static\"]\n\nhtml_logo = \"images/Minerva_logo.png\"\n\nhtml_theme_options = {\n \"navigation_depth\": -1,\n}\n" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 2969, + "snippet": { + "text": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../minerva/\"))\n\nimport minerva # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"minerva\"\ncopyright = \"2023, Harry Baker\"\nauthor = minerva.__author__\nversion = minerva.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"myst_parser\",\n]\n\nsource_suffix = [\".rst\", \".md\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"typing\": (\"https://typing.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchgeo\": (\"https://torchgeo.readthedocs.io/en/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"pandas\": (\"https://pandas.pydata.org/docs/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/stable/\", None),\n \"pillow\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"source/_static\"]\n\nhtml_logo = \"images/Minerva_logo.png\"\n\nhtml_theme_options = {\n \"navigation_depth\": -1,\n}\n" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "26ac99e6d728f4fda3b5f3063201aabf4a891595b69a7574c156f56d5a10d2a7" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyInterpreterInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "No Python interpreter configured for the project", + "markdown": "No Python interpreter configured for the project" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "minerva/__init__.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 2019, + "snippet": { + "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\nr\"\"\":mod:`minerva` is a package designed to facilitate the fitting and visualation of models for geo-spatial research.\n\nTo main entry point to :mod:`minerva` is via :class:`Trainer`.\n >>> from minerva.utils import CONFIG # Module containing various utility functions.\n >>> from minerva.trainer import Trainer # Class designed to handle fitting of model.\n\nInitialise a Trainer. Also creates the model.\n >>> trainer = Trainer(**CONFIG)\n\nRun the fitting (train and validation epochs).\n >>> trainer.fit()\n\nRun the testing epoch and output results.\n >>> trainer.test()\n\n.. note::\n Includes two small ``.tiff`` exercpts from the ChesapeakeCVPR dataset used for testing.\n\n https://lila.science/datasets/chesapeakelandcover Credit for the data goes to:\n\n Robinson C, Hou L, Malkin K, Soobitsky R, Czawlytko J, Dilkina B, Jojic N.\n Large Scale High-Resolution Land Cover Mapping with Multi-Resolution Data.\n Proceedings of the 2019 Conference on Computer Vision and Pattern Recognition (CVPR 2019)\n\"\"\"\n\n__version__ = \"0.23.3\"\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 2019, + "snippet": { + "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\nr\"\"\":mod:`minerva` is a package designed to facilitate the fitting and visualation of models for geo-spatial research.\n\nTo main entry point to :mod:`minerva` is via :class:`Trainer`.\n >>> from minerva.utils import CONFIG # Module containing various utility functions.\n >>> from minerva.trainer import Trainer # Class designed to handle fitting of model.\n\nInitialise a Trainer. Also creates the model.\n >>> trainer = Trainer(**CONFIG)\n\nRun the fitting (train and validation epochs).\n >>> trainer.fit()\n\nRun the testing epoch and output results.\n >>> trainer.test()\n\n.. note::\n Includes two small ``.tiff`` exercpts from the ChesapeakeCVPR dataset used for testing.\n\n https://lila.science/datasets/chesapeakelandcover Credit for the data goes to:\n\n Robinson C, Hou L, Malkin K, Soobitsky R, Czawlytko J, Dilkina B, Jojic N.\n Large Scale High-Resolution Land Cover Mapping with Multi-Resolution Data.\n Proceedings of the 2019 Conference on Computer Vision and Pattern Recognition (CVPR 2019)\n\"\"\"\n\n__version__ = \"0.23.3\"\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "f27e75f6433d487c58e6e1c86b586733e88e3e56da3eacc502f45af6fceac12c" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyInterpreterInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "No Python interpreter configured for the project", + "markdown": "No Python interpreter configured for the project" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "scripts/Torch_to_ONNX.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 2990, + "snippet": { + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Converts :mod:`torch` model weights to ``ONNX`` format.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, universal_path\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu, rank=args.rank, world_size=args.world_size, verbose=False, **CONFIG\n )\n\n weights_path = universal_path(CONFIG[\"dir\"][\"cache\"]) / CONFIG[\"pre_train_name\"]\n\n trainer.save_model(fn=weights_path, fmt=\"onnx\")\n\n print(f\"Model saved to --> {weights_path}.onnx\")\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 2990, + "snippet": { + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Converts :mod:`torch` model weights to ``ONNX`` format.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, universal_path\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu, rank=args.rank, world_size=args.world_size, verbose=False, **CONFIG\n )\n\n weights_path = universal_path(CONFIG[\"dir\"][\"cache\"]) / CONFIG[\"pre_train_name\"]\n\n trainer.save_model(fn=weights_path, fmt=\"onnx\")\n\n print(f\"Model saved to --> {weights_path}.onnx\")\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "be3acc87618ecbe003b13a579c194dc0832ae04e3e63df927c6808bf9d2ccc73" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyInterpreterInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "No Python interpreter configured for the project", + "markdown": "No Python interpreter configured for the project" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "minerva/utils/__init__.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 3957, + "snippet": { + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Utility functionality, visualisation and configuration for :mod:`minerva`.\n\nAttributes:\n CONFIG_NAME (str): Name of the config to be used in the experiment.\n CONFIG_PATH (str): Path to the config.\n MASTER_PARSER (~argparse.ArgumentParser): Argparser for the CLI for the config loading.\n CONFIG (dict[str, Any]): The master config loaded by :mod:`config_load`.\n AUX_CONFIGS (dict[str, Any]): Dictionary containing the auxilary configs loaded by :mod:`config_load`.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"universal_path\",\n \"CONFIG_NAME\",\n \"CONFIG_PATH\",\n \"MASTER_PARSER\",\n \"CONFIG\",\n \"AUX_CONFIGS\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom minerva.utils.config_load import check_paths, load_configs\nfrom minerva.utils.config_load import universal_path as universal_path # noqa: F401\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Objects to hold the config name and path.\nCONFIG_NAME: Optional[str]\nCONFIG_PATH: Optional[Path]\n\nMASTER_PARSER = argparse.ArgumentParser(add_help=False)\nMASTER_PARSER.add_argument(\n \"-c\",\n \"--config\",\n type=str,\n help=\"Path to the config file defining experiment\",\n)\nMASTER_PARSER.add_argument(\n \"--use-default-conf-dir\",\n dest=\"use_default_conf_dir\",\n action=\"store_true\",\n help=\"Set config path to default\",\n)\n_args, _ = MASTER_PARSER.parse_known_args()\n\n# Store the current working directory (i.e where script is being run from).\n_cwd = os.getcwd()\n\n_path, CONFIG_NAME, CONFIG_PATH = check_paths(_args.config, _args.use_default_conf_dir)\n\n# Loads the configs from file using paths found in sys.args.\nCONFIG, AUX_CONFIGS = load_configs(_path)\n\n# Change the working directory back to script location.\nos.chdir(_cwd)\n" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 3957, + "snippet": { + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Utility functionality, visualisation and configuration for :mod:`minerva`.\n\nAttributes:\n CONFIG_NAME (str): Name of the config to be used in the experiment.\n CONFIG_PATH (str): Path to the config.\n MASTER_PARSER (~argparse.ArgumentParser): Argparser for the CLI for the config loading.\n CONFIG (dict[str, Any]): The master config loaded by :mod:`config_load`.\n AUX_CONFIGS (dict[str, Any]): Dictionary containing the auxilary configs loaded by :mod:`config_load`.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"universal_path\",\n \"CONFIG_NAME\",\n \"CONFIG_PATH\",\n \"MASTER_PARSER\",\n \"CONFIG\",\n \"AUX_CONFIGS\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom minerva.utils.config_load import check_paths, load_configs\nfrom minerva.utils.config_load import universal_path as universal_path # noqa: F401\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Objects to hold the config name and path.\nCONFIG_NAME: Optional[str]\nCONFIG_PATH: Optional[Path]\n\nMASTER_PARSER = argparse.ArgumentParser(add_help=False)\nMASTER_PARSER.add_argument(\n \"-c\",\n \"--config\",\n type=str,\n help=\"Path to the config file defining experiment\",\n)\nMASTER_PARSER.add_argument(\n \"--use-default-conf-dir\",\n dest=\"use_default_conf_dir\",\n action=\"store_true\",\n help=\"Set config path to default\",\n)\n_args, _ = MASTER_PARSER.parse_known_args()\n\n# Store the current working directory (i.e where script is being run from).\n_cwd = os.getcwd()\n\n_path, CONFIG_NAME, CONFIG_PATH = check_paths(_args.config, _args.use_default_conf_dir)\n\n# Loads the configs from file using paths found in sys.args.\nCONFIG, AUX_CONFIGS = load_configs(_path)\n\n# Change the working directory back to script location.\nos.chdir(_cwd)\n" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "432b483a685d69f1cf90c4a51a5aa334df1ae3a50949576ed41fbc0047dc48ce" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyInterpreterInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "No Python interpreter configured for the project", + "markdown": "No Python interpreter configured for the project" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "scripts/MinervaPipe.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 3215, + "snippet": { + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to handle the pre-training of model and its subsequent downstream task fine-tuning.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport subprocess\nimport sys\nfrom typing import Any, Dict\n\nimport yaml\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(config_path: str):\n with open(config_path) as f:\n config: Dict[str, Any] = yaml.safe_load(f)\n\n for key in config.keys():\n print(\n f\"\\nExecuting {key} experiment + =====================================================================\"\n )\n\n try:\n exit_code = subprocess.Popen( # nosec B602\n f\"python MinervaExp.py -c {config[key]}\",\n shell=True,\n ).wait()\n\n if exit_code != 0:\n raise SystemExit()\n except KeyboardInterrupt as err:\n print(f\"{err}: Skipping to next experiment...\")\n\n except SystemExit as err:\n print(err)\n print(f\"Error in {key} experiment -> ABORT\")\n sys.exit(exit_code) # type: ignore\n\n print(\n f\"\\n{key} experiment COMPLETE + =====================================================================\"\n )\n\n print(\"\\nPipeline COMPLETE\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config_path\", type=str)\n args = parser.parse_args()\n\n main(config_path=args.config_path)\n" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 1, + "startColumn": 1, + "charOffset": 0, + "charLength": 3215, + "snippet": { + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to handle the pre-training of model and its subsequent downstream task fine-tuning.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport subprocess\nimport sys\nfrom typing import Any, Dict\n\nimport yaml\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(config_path: str):\n with open(config_path) as f:\n config: Dict[str, Any] = yaml.safe_load(f)\n\n for key in config.keys():\n print(\n f\"\\nExecuting {key} experiment + =====================================================================\"\n )\n\n try:\n exit_code = subprocess.Popen( # nosec B602\n f\"python MinervaExp.py -c {config[key]}\",\n shell=True,\n ).wait()\n\n if exit_code != 0:\n raise SystemExit()\n except KeyboardInterrupt as err:\n print(f\"{err}: Skipping to next experiment...\")\n\n except SystemExit as err:\n print(err)\n print(f\"Error in {key} experiment -> ABORT\")\n sys.exit(exit_code) # type: ignore\n\n print(\n f\"\\n{key} experiment COMPLETE + =====================================================================\"\n )\n\n print(\"\\nPipeline COMPLETE\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config_path\", type=str)\n args = parser.parse_args()\n\n main(config_path=args.config_path)\n" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "5759e3466baa71903baffed4c3f49cd277d5e701b966c2512c50d4c6f41af28f" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyInterpreterInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "No Python interpreter configured for the project", + "markdown": "No Python interpreter configured for the project" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "scripts/MinervaClusterVis.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 93, + "charLength": 2996, "snippet": { - "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n" + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Adaptation of ``MinervaExp.py`` for cluster visualisation of a model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(gpu=gpu, rank=args.rank, world_size=args.world_size, **CONFIG)\n\n trainer.tsne_cluster()\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" }, "sourceLanguage": "Python" }, @@ -15912,9 +16265,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 93, + "charLength": 2996, "snippet": { - "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n" + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Adaptation of ``MinervaExp.py`` for cluster visualisation of a model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(gpu=gpu, rank=args.rank, world_size=args.world_size, **CONFIG)\n\n trainer.tsne_cluster()\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" } } }, @@ -15927,7 +16280,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "2f6c5c61654fc86a004631f623f21bb30341adf9fbc6b205182834835f2cc226" + "equalIndicator/v1": "9db4c0574404cf485f77f83d8dd863cb75fb99638d421a6dcefbee479875cd5e" }, "properties": { "ideaSeverity": "ERROR" @@ -15995,16 +16348,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/__init__.py", + "uri": "scripts/MinervaExp.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 2019, + "charLength": 3546, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\nr\"\"\":mod:`minerva` is a package designed to facilitate the fitting and visualation of models for geo-spatial research.\n\nTo main entry point to :mod:`minerva` is via :class:`Trainer`.\n >>> from minerva.utils import CONFIG # Module containing various utility functions.\n >>> from minerva.trainer import Trainer # Class designed to handle fitting of model.\n\nInitialise a Trainer. Also creates the model.\n >>> trainer = Trainer(**CONFIG)\n\nRun the fitting (train and validation epochs).\n >>> trainer.fit()\n\nRun the testing epoch and output results.\n >>> trainer.test()\n\n.. note::\n Includes two small ``.tiff`` exercpts from the ChesapeakeCVPR dataset used for testing.\n\n https://lila.science/datasets/chesapeakelandcover Credit for the data goes to:\n\n Robinson C, Hou L, Malkin K, Soobitsky R, Czawlytko J, Dilkina B, Jojic N.\n Large Scale High-Resolution Land Cover Mapping with Multi-Resolution Data.\n Proceedings of the 2019 Conference on Computer Vision and Pattern Recognition (CVPR 2019)\n\"\"\"\n\n__version__ = \"0.23.2\"\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n" + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to execute the creation, fitting and testing of a computer vision neural network model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n\n# TODO: Add ability to conduct hyper-parameter iterative variation experimentation.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nimport argcomplete\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu,\n rank=args.rank,\n world_size=args.world_size,\n wandb_run=args.wandb_run,\n **CONFIG,\n )\n\n if not CONFIG.get(\"eval\", False):\n trainer.fit()\n\n if CONFIG.get(\"pre_train\", False) and gpu == 0:\n trainer.save_backbone()\n trainer.close()\n\n if not CONFIG.get(\"pre_train\", False):\n trainer.test()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n argcomplete.autocomplete(parser)\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Print Minerva banner.\n utils._print_banner()\n\n with runner.WandbConnectionManager():\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" }, "sourceLanguage": "Python" }, @@ -16012,9 +16365,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 2019, + "charLength": 3546, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\nr\"\"\":mod:`minerva` is a package designed to facilitate the fitting and visualation of models for geo-spatial research.\n\nTo main entry point to :mod:`minerva` is via :class:`Trainer`.\n >>> from minerva.utils import CONFIG # Module containing various utility functions.\n >>> from minerva.trainer import Trainer # Class designed to handle fitting of model.\n\nInitialise a Trainer. Also creates the model.\n >>> trainer = Trainer(**CONFIG)\n\nRun the fitting (train and validation epochs).\n >>> trainer.fit()\n\nRun the testing epoch and output results.\n >>> trainer.test()\n\n.. note::\n Includes two small ``.tiff`` exercpts from the ChesapeakeCVPR dataset used for testing.\n\n https://lila.science/datasets/chesapeakelandcover Credit for the data goes to:\n\n Robinson C, Hou L, Malkin K, Soobitsky R, Czawlytko J, Dilkina B, Jojic N.\n Large Scale High-Resolution Land Cover Mapping with Multi-Resolution Data.\n Proceedings of the 2019 Conference on Computer Vision and Pattern Recognition (CVPR 2019)\n\"\"\"\n\n__version__ = \"0.23.2\"\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n" + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to execute the creation, fitting and testing of a computer vision neural network model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n\n# TODO: Add ability to conduct hyper-parameter iterative variation experimentation.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nimport argcomplete\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu,\n rank=args.rank,\n world_size=args.world_size,\n wandb_run=args.wandb_run,\n **CONFIG,\n )\n\n if not CONFIG.get(\"eval\", False):\n trainer.fit()\n\n if CONFIG.get(\"pre_train\", False) and gpu == 0:\n trainer.save_backbone()\n trainer.close()\n\n if not CONFIG.get(\"pre_train\", False):\n trainer.test()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n argcomplete.autocomplete(parser)\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Print Minerva banner.\n utils._print_banner()\n\n with runner.WandbConnectionManager():\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" } } }, @@ -16027,7 +16380,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "6d72415a2205d157eda780cd56456cb689408cda33abe99d371cbf7180f903e2" + "equalIndicator/v1": "a7e7273e1f651dd7466ec371f423f30a9a9c3d0140801a9d5b2e0f3dc027597e" }, "properties": { "ideaSeverity": "ERROR" @@ -16045,16 +16398,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "docs/conf.py", + "uri": "minerva/optimisers.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 2969, + "charLength": 6249, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../minerva/\"))\n\nimport minerva # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"minerva\"\ncopyright = \"2023, Harry Baker\"\nauthor = minerva.__author__\nversion = minerva.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"myst_parser\",\n]\n\nsource_suffix = [\".rst\", \".md\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"typing\": (\"https://typing.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchgeo\": (\"https://torchgeo.readthedocs.io/en/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"pandas\": (\"https://pandas.pydata.org/docs/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/stable/\", None),\n \"pillow\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"source/_static\"]\n\nhtml_logo = \"images/Minerva_logo.png\"\n\nhtml_theme_options = {\n \"navigation_depth\": -1,\n}\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2018 Noah Golmant\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Custom :mod:`torch` optimisers.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Noah Golmant\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright (c) 2018 Noah Golmant\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Callable, Dict, Iterable, Optional, Union\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass LARS(Optimizer):\n r\"\"\"Implements layer-wise adaptive rate scaling for SGD.\n\n Source: https://github.com/noahgolmant/pytorch-lars/blob/master/lars.py\n\n Args:\n params (~typing.Iterable | dict): Iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): base learning rate (\\gamma_0)\n momentum (float, optional): momentum factor (default: 0) (\"m\")\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n (\"\\beta\")\n eta (float, optional): LARS coefficient\n max_epoch (int): maximum training epoch to determine polynomial LR decay.\n\n Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.\n Large Batch Training of Convolutional Networks: https://arxiv.org/abs/1708.03888\n\n Example:\n >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n \"\"\"\n\n def __init__(\n self,\n params: Union[Iterable[Any], Dict[Any, Any]],\n lr: float,\n momentum: float = 0.9,\n weight_decay: float = 0.0005,\n eta: float = 0.001,\n max_epoch: int = 200,\n ):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if eta < 0.0:\n raise ValueError(\"Invalid LARS coefficient value: {}\".format(eta))\n\n self.epoch = 0\n defaults = dict(\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n eta=eta,\n max_epoch=max_epoch,\n )\n super(LARS, self).__init__(params, defaults)\n\n def step( # type: ignore[override]\n self, epoch: Optional[int] = None, closure: Optional[Callable[..., Any]] = None\n ):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n epoch (int, optioanl): Current epoch to calculate polynomial LR decay schedule.\n if None, uses self.epoch and increments it.\n \"\"\"\n loss = None\n if closure is not None: # pragma: no cover\n loss = closure()\n\n if epoch is None:\n epoch = self.epoch\n self.epoch += 1\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n eta = group[\"eta\"]\n lr = group[\"lr\"]\n max_epoch = group[\"max_epoch\"]\n\n for p in group[\"params\"]:\n if p.grad is None: # pragma: no cover\n continue\n\n param_state = self.state[p]\n d_p = p.grad.data\n\n weight_norm = torch.linalg.norm(p.data)\n grad_norm = torch.linalg.norm(d_p)\n\n # Global LR computed on polynomial decay schedule\n decay = (1 - float(epoch) / max_epoch) ** 2\n global_lr = lr * decay\n\n # Compute local learning rate for this layer\n local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm)\n\n # Update the momentum term\n actual_lr = local_lr * global_lr\n\n if \"momentum_buffer\" not in param_state:\n buf = param_state[\"momentum_buffer\"] = torch.zeros_like(p.data) # type: ignore[attr-defined]\n else:\n buf = param_state[\"momentum_buffer\"]\n buf.mul_(momentum).add_(d_p + weight_decay * p.data, alpha=actual_lr)\n p.data.add_(-buf)\n\n return loss\n" }, "sourceLanguage": "Python" }, @@ -16062,9 +16415,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 2969, + "charLength": 6249, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Configuration file for the Sphinx documentation builder.\n#\n# This file only contains a selection of the most common options. For a full\n# list see the documentation:\n# https://www.sphinx-doc.org/en/master/usage/configuration.html\n\n# -- Path setup --------------------------------------------------------------\n\n# If extensions (or modules to document with autodoc) are in another directory,\n# add these directories to sys.path here. If the directory is relative to the\n# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../minerva/\"))\n\nimport minerva # noqa: E402\n\n# -- Project information -----------------------------------------------------\n\nproject = \"minerva\"\ncopyright = \"2023, Harry Baker\"\nauthor = minerva.__author__\nversion = minerva.__version__\n\n\n# -- General configuration ---------------------------------------------------\n\n# Add any Sphinx extension module names here, as strings. They can be\n# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom\n# ones.\nextensions = [\n \"sphinx.ext.napoleon\",\n \"sphinx.ext.autodoc\",\n \"sphinx.ext.intersphinx\",\n \"myst_parser\",\n]\n\nsource_suffix = [\".rst\", \".md\"]\n\nintersphinx_mapping = {\n \"python\": (\"https://docs.python.org/3\", None),\n \"typing\": (\"https://typing.readthedocs.io/en/latest/\", None),\n \"torch\": (\"https://pytorch.org/docs/stable/\", None),\n \"torchgeo\": (\"https://torchgeo.readthedocs.io/en/stable/\", None),\n \"torchvision\": (\"https://pytorch.org/vision/stable/\", None),\n \"matplotlib\": (\"https://matplotlib.org/stable/\", None),\n \"pandas\": (\"https://pandas.pydata.org/docs/\", None),\n \"numpy\": (\"https://numpy.org/doc/stable/\", None),\n \"rasterio\": (\"https://rasterio.readthedocs.io/en/stable/\", None),\n \"pillow\": (\"https://pillow.readthedocs.io/en/stable/\", None),\n}\n\n# Add any paths that contain templates here, relative to this directory.\ntemplates_path = [\"_templates\"]\n\n# List of patterns, relative to source directory, that match files and\n# directories to ignore when looking for source files.\n# This pattern also affects html_static_path and html_extra_path.\nexclude_patterns = [\"_build\", \"Thumbs.db\", \".DS_Store\"]\n\n# If true, the current module name will be prepended to all description\n# unit titles (such as .. function::).\nadd_module_names = False\n\n# -- Options for HTML output -------------------------------------------------\n\n# The theme to use for HTML and HTML Help pages. See the documentation for\n# a list of builtin themes.\n#\nhtml_theme = \"sphinx_rtd_theme\"\n\n# Add any paths that contain custom static files (such as style sheets) here,\n# relative to this directory. They are copied after the builtin static files,\n# so a file named \"default.css\" will overwrite the builtin \"default.css\".\nhtml_static_path = [\"source/_static\"]\n\nhtml_logo = \"images/Minerva_logo.png\"\n\nhtml_theme_options = {\n \"navigation_depth\": -1,\n}\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2018 Noah Golmant\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Custom :mod:`torch` optimisers.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Noah Golmant\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright (c) 2018 Noah Golmant\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Callable, Dict, Iterable, Optional, Union\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass LARS(Optimizer):\n r\"\"\"Implements layer-wise adaptive rate scaling for SGD.\n\n Source: https://github.com/noahgolmant/pytorch-lars/blob/master/lars.py\n\n Args:\n params (~typing.Iterable | dict): Iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): base learning rate (\\gamma_0)\n momentum (float, optional): momentum factor (default: 0) (\"m\")\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n (\"\\beta\")\n eta (float, optional): LARS coefficient\n max_epoch (int): maximum training epoch to determine polynomial LR decay.\n\n Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.\n Large Batch Training of Convolutional Networks: https://arxiv.org/abs/1708.03888\n\n Example:\n >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n \"\"\"\n\n def __init__(\n self,\n params: Union[Iterable[Any], Dict[Any, Any]],\n lr: float,\n momentum: float = 0.9,\n weight_decay: float = 0.0005,\n eta: float = 0.001,\n max_epoch: int = 200,\n ):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if eta < 0.0:\n raise ValueError(\"Invalid LARS coefficient value: {}\".format(eta))\n\n self.epoch = 0\n defaults = dict(\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n eta=eta,\n max_epoch=max_epoch,\n )\n super(LARS, self).__init__(params, defaults)\n\n def step( # type: ignore[override]\n self, epoch: Optional[int] = None, closure: Optional[Callable[..., Any]] = None\n ):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n epoch (int, optioanl): Current epoch to calculate polynomial LR decay schedule.\n if None, uses self.epoch and increments it.\n \"\"\"\n loss = None\n if closure is not None: # pragma: no cover\n loss = closure()\n\n if epoch is None:\n epoch = self.epoch\n self.epoch += 1\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n eta = group[\"eta\"]\n lr = group[\"lr\"]\n max_epoch = group[\"max_epoch\"]\n\n for p in group[\"params\"]:\n if p.grad is None: # pragma: no cover\n continue\n\n param_state = self.state[p]\n d_p = p.grad.data\n\n weight_norm = torch.linalg.norm(p.data)\n grad_norm = torch.linalg.norm(d_p)\n\n # Global LR computed on polynomial decay schedule\n decay = (1 - float(epoch) / max_epoch) ** 2\n global_lr = lr * decay\n\n # Compute local learning rate for this layer\n local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm)\n\n # Update the momentum term\n actual_lr = local_lr * global_lr\n\n if \"momentum_buffer\" not in param_state:\n buf = param_state[\"momentum_buffer\"] = torch.zeros_like(p.data) # type: ignore[attr-defined]\n else:\n buf = param_state[\"momentum_buffer\"]\n buf.mul_(momentum).add_(d_p + weight_decay * p.data, alpha=actual_lr)\n p.data.add_(-buf)\n\n return loss\n" } } }, @@ -16077,7 +16430,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "26ac99e6d728f4fda3b5f3063201aabf4a891595b69a7574c156f56d5a10d2a7" + "equalIndicator/v1": "3693ba3b9ce8e84975528bf13a0c3cf93fc575823b2cf7b7c18ab1cc4daa8aee" }, "properties": { "ideaSeverity": "ERROR" @@ -16095,16 +16448,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/utils/config_load.py", + "uri": "setup.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 8295, + "charLength": 93, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Handles the loading of config files and checking paths.\n\nAttributes:\n DEFAULT_CONF_DIR_PATH (~pathlib.Path): Path to the default config directory.\n DEFAULT_CONFIG_NAME (str): Name of the default, example config.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"DEFAULT_CONF_DIR_PATH\",\n \"DEFAULT_CONFIG_NAME\",\n \"ToDefaultConfDir\",\n \"universal_path\",\n \"check_paths\",\n \"chdir_to_default\",\n \"load_configs\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport yaml\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Default values for the path to the config directory and config name.\nDEFAULT_CONF_DIR_PATH = Path(\"../../inbuilt_cfgs/\")\nDEFAULT_CONFIG_NAME: str = \"example_config.yml\"\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass ToDefaultConfDir:\n \"\"\"Changes to the default config directory. Switches back to the previous CWD on close.\"\"\"\n\n def __init__(self) -> None:\n self._cwd = os.getcwd()\n self._def_dir = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n\n def __enter__(self) -> None:\n os.chdir(self._def_dir)\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.chdir(self._cwd)\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef universal_path(path: Any) -> Path:\n \"\"\"Creates a :class:`~pathlib.Path` object from :class:`str` or :class:`~typing.Iterable` inputs.\n\n Args:\n path (~typing.Any): Representation of a path to convert to :class:`~pathlib.Path` object.\n\n Returns:\n ~pathlib.Path: :class:`~pathlib.Path` object of the input ``path``.\n \"\"\"\n if isinstance(path, Path):\n return path\n elif type(path) == str:\n return Path(path)\n else:\n return Path(*path)\n\n\ndef check_paths(\n config: Optional[Union[str, Path]] = None, use_default_conf_dir: bool = False\n) -> Tuple[str, Optional[str], Optional[Path]]:\n \"\"\"Checks the path given for the config.\n\n Args:\n config (str | ~pathlib.Path | None): Path to the config given from the CLI.\n use_default_conf_dir (bool): Assumes that ``config`` is in the default config directory if ``True``.\n\n Returns:\n tuple[str, ~typing.Optional[str], ~typing.Optional[~pathlib.Path]]: Tuple of the path for\n :func:`load_configs` to use, the config name and path to config.\n \"\"\"\n\n config_name: Optional[str] = None\n config_path: Optional[Path] = None\n\n if config is not None:\n p = Path(config)\n head = p.parent\n tail = p.name\n\n if str(head) != \"\" or str(head) is not None:\n config_path = head\n elif str(head) == \"\" or head is None:\n config_path = Path(\"\")\n\n config_name = tail\n\n # Overwrites the config path if option found in args regardless of -c args.\n if use_default_conf_dir:\n if config_path is not None:\n print(\n \"Warning: Config path specified with `--default_config_dir` option.\"\n + \"\\nDefault config directory path will be used.\"\n )\n config_path = None\n\n # If no config_path, change directory to the default config directory.\n if config_path is None:\n config_name = chdir_to_default(config_name)\n\n # Check the config specified exists at the path given. If not, assume its in the default directory.\n else:\n if config_name is None or not (config_path / config_name).exists():\n config_name = chdir_to_default(config_name)\n else:\n pass\n\n path = config_name\n if config_path is not None and config_path != Path(\"\"):\n path = str(config_path / config_name)\n\n return path, config_name, config_path\n\n\ndef chdir_to_default(config_name: Optional[str] = None) -> str:\n \"\"\"Changes the current working directory to the default config directory.\n\n Args:\n config_name (str): Optional; Name of the config in the default directory. Defaults to None.\n\n Returns:\n str: :data:`DEFAULT_CONFIG_NAME` if ``config_name`` not in default directory. ``config_name`` if it does exist.\n \"\"\"\n\n this_abs_path = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n os.chdir(this_abs_path)\n\n if config_name is None or not Path(config_name).exists():\n return DEFAULT_CONFIG_NAME\n else:\n return config_name\n\n\ndef load_configs(master_config_path: Union[str, Path]) -> Tuple[Dict[str, Any], ...]:\n \"\"\"Loads the master config from ``YAML``. Finds other config paths within and loads them.\n\n Args:\n master_config_path (str): Path to the master config ``YAML`` file.\n\n Returns:\n tuple[dict[str, ~typing.Any], ...]: Master config and any other configs found from paths in the master config.\n \"\"\"\n\n def yaml_load(path: Union[str, Path]) -> Any:\n \"\"\"Loads ``YAML`` file from path as dict.\n Args:\n path(str | ~pathlib.Path): Path to ``YAML`` file.\n\n Returns:\n yml_file (dict): YAML file loaded as dict.\n \"\"\"\n with open(path) as f:\n return yaml.safe_load(f)\n\n def aux_config_load(paths: Dict[str, str]) -> Dict[str, Dict[str, Any]]:\n \"\"\"Loads and returns config files from YAML as dicts.\n\n Args:\n paths (dict[str, str]): Dictionary mapping config names to paths to their ``YAML`` files.\n\n Returns:\n dict[str, dict[str, ~typing.Any]]: Config dictionaries loaded from ``YAML`` from paths.\n \"\"\"\n configs = {}\n for _config_name in paths.keys():\n # Loads config from YAML as dict.\n configs[_config_name] = yaml_load(paths[_config_name])\n return configs\n\n # First loads the master config.\n master_config = yaml_load(master_config_path)\n\n # Gets the paths for the other configs from master config.\n config_paths = master_config[\"dir\"][\"configs\"]\n\n # Loads and returns the other configs along with master config.\n return master_config, aux_config_load(config_paths)\n" + "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n" }, "sourceLanguage": "Python" }, @@ -16112,9 +16465,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 8295, + "charLength": 93, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Handles the loading of config files and checking paths.\n\nAttributes:\n DEFAULT_CONF_DIR_PATH (~pathlib.Path): Path to the default config directory.\n DEFAULT_CONFIG_NAME (str): Name of the default, example config.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"DEFAULT_CONF_DIR_PATH\",\n \"DEFAULT_CONFIG_NAME\",\n \"ToDefaultConfDir\",\n \"universal_path\",\n \"check_paths\",\n \"chdir_to_default\",\n \"load_configs\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport yaml\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Default values for the path to the config directory and config name.\nDEFAULT_CONF_DIR_PATH = Path(\"../../inbuilt_cfgs/\")\nDEFAULT_CONFIG_NAME: str = \"example_config.yml\"\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass ToDefaultConfDir:\n \"\"\"Changes to the default config directory. Switches back to the previous CWD on close.\"\"\"\n\n def __init__(self) -> None:\n self._cwd = os.getcwd()\n self._def_dir = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n\n def __enter__(self) -> None:\n os.chdir(self._def_dir)\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.chdir(self._cwd)\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef universal_path(path: Any) -> Path:\n \"\"\"Creates a :class:`~pathlib.Path` object from :class:`str` or :class:`~typing.Iterable` inputs.\n\n Args:\n path (~typing.Any): Representation of a path to convert to :class:`~pathlib.Path` object.\n\n Returns:\n ~pathlib.Path: :class:`~pathlib.Path` object of the input ``path``.\n \"\"\"\n if isinstance(path, Path):\n return path\n elif type(path) == str:\n return Path(path)\n else:\n return Path(*path)\n\n\ndef check_paths(\n config: Optional[Union[str, Path]] = None, use_default_conf_dir: bool = False\n) -> Tuple[str, Optional[str], Optional[Path]]:\n \"\"\"Checks the path given for the config.\n\n Args:\n config (str | ~pathlib.Path | None): Path to the config given from the CLI.\n use_default_conf_dir (bool): Assumes that ``config`` is in the default config directory if ``True``.\n\n Returns:\n tuple[str, ~typing.Optional[str], ~typing.Optional[~pathlib.Path]]: Tuple of the path for\n :func:`load_configs` to use, the config name and path to config.\n \"\"\"\n\n config_name: Optional[str] = None\n config_path: Optional[Path] = None\n\n if config is not None:\n p = Path(config)\n head = p.parent\n tail = p.name\n\n if str(head) != \"\" or str(head) is not None:\n config_path = head\n elif str(head) == \"\" or head is None:\n config_path = Path(\"\")\n\n config_name = tail\n\n # Overwrites the config path if option found in args regardless of -c args.\n if use_default_conf_dir:\n if config_path is not None:\n print(\n \"Warning: Config path specified with `--default_config_dir` option.\"\n + \"\\nDefault config directory path will be used.\"\n )\n config_path = None\n\n # If no config_path, change directory to the default config directory.\n if config_path is None:\n config_name = chdir_to_default(config_name)\n\n # Check the config specified exists at the path given. If not, assume its in the default directory.\n else:\n if config_name is None or not (config_path / config_name).exists():\n config_name = chdir_to_default(config_name)\n else:\n pass\n\n path = config_name\n if config_path is not None and config_path != Path(\"\"):\n path = str(config_path / config_name)\n\n return path, config_name, config_path\n\n\ndef chdir_to_default(config_name: Optional[str] = None) -> str:\n \"\"\"Changes the current working directory to the default config directory.\n\n Args:\n config_name (str): Optional; Name of the config in the default directory. Defaults to None.\n\n Returns:\n str: :data:`DEFAULT_CONFIG_NAME` if ``config_name`` not in default directory. ``config_name`` if it does exist.\n \"\"\"\n\n this_abs_path = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n os.chdir(this_abs_path)\n\n if config_name is None or not Path(config_name).exists():\n return DEFAULT_CONFIG_NAME\n else:\n return config_name\n\n\ndef load_configs(master_config_path: Union[str, Path]) -> Tuple[Dict[str, Any], ...]:\n \"\"\"Loads the master config from ``YAML``. Finds other config paths within and loads them.\n\n Args:\n master_config_path (str): Path to the master config ``YAML`` file.\n\n Returns:\n tuple[dict[str, ~typing.Any], ...]: Master config and any other configs found from paths in the master config.\n \"\"\"\n\n def yaml_load(path: Union[str, Path]) -> Any:\n \"\"\"Loads ``YAML`` file from path as dict.\n Args:\n path(str | ~pathlib.Path): Path to ``YAML`` file.\n\n Returns:\n yml_file (dict): YAML file loaded as dict.\n \"\"\"\n with open(path) as f:\n return yaml.safe_load(f)\n\n def aux_config_load(paths: Dict[str, str]) -> Dict[str, Dict[str, Any]]:\n \"\"\"Loads and returns config files from YAML as dicts.\n\n Args:\n paths (dict[str, str]): Dictionary mapping config names to paths to their ``YAML`` files.\n\n Returns:\n dict[str, dict[str, ~typing.Any]]: Config dictionaries loaded from ``YAML`` from paths.\n \"\"\"\n configs = {}\n for _config_name in paths.keys():\n # Loads config from YAML as dict.\n configs[_config_name] = yaml_load(paths[_config_name])\n return configs\n\n # First loads the master config.\n master_config = yaml_load(master_config_path)\n\n # Gets the paths for the other configs from master config.\n config_paths = master_config[\"dir\"][\"configs\"]\n\n # Loads and returns the other configs along with master config.\n return master_config, aux_config_load(config_paths)\n" + "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nif __name__ == \"__main__\":\n setup()\n" } } }, @@ -16127,7 +16480,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "71ccdb93e679527bea003deb6c3feb1e8ad8ba222092396293a1e714ff66c2ab" + "equalIndicator/v1": "2f6c5c61654fc86a004631f623f21bb30341adf9fbc6b205182834835f2cc226" }, "properties": { "ideaSeverity": "ERROR" @@ -16145,16 +16498,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/optimisers.py", + "uri": "scripts/ManifestMake.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 6249, + "charLength": 2495, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2018 Noah Golmant\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Custom :mod:`torch` optimisers.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Noah Golmant\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright (c) 2018 Noah Golmant\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Callable, Dict, Iterable, Optional, Union\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass LARS(Optimizer):\n r\"\"\"Implements layer-wise adaptive rate scaling for SGD.\n\n Source: https://github.com/noahgolmant/pytorch-lars/blob/master/lars.py\n\n Args:\n params (~typing.Iterable | dict): Iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): base learning rate (\\gamma_0)\n momentum (float, optional): momentum factor (default: 0) (\"m\")\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n (\"\\beta\")\n eta (float, optional): LARS coefficient\n max_epoch (int): maximum training epoch to determine polynomial LR decay.\n\n Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.\n Large Batch Training of Convolutional Networks: https://arxiv.org/abs/1708.03888\n\n Example:\n >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n \"\"\"\n\n def __init__(\n self,\n params: Union[Iterable[Any], Dict[Any, Any]],\n lr: float,\n momentum: float = 0.9,\n weight_decay: float = 0.0005,\n eta: float = 0.001,\n max_epoch: int = 200,\n ):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if eta < 0.0:\n raise ValueError(\"Invalid LARS coefficient value: {}\".format(eta))\n\n self.epoch = 0\n defaults = dict(\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n eta=eta,\n max_epoch=max_epoch,\n )\n super(LARS, self).__init__(params, defaults)\n\n def step( # type: ignore[override]\n self, epoch: Optional[int] = None, closure: Optional[Callable[..., Any]] = None\n ):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n epoch (int, optioanl): Current epoch to calculate polynomial LR decay schedule.\n if None, uses self.epoch and increments it.\n \"\"\"\n loss = None\n if closure is not None: # pragma: no cover\n loss = closure()\n\n if epoch is None:\n epoch = self.epoch\n self.epoch += 1\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n eta = group[\"eta\"]\n lr = group[\"lr\"]\n max_epoch = group[\"max_epoch\"]\n\n for p in group[\"params\"]:\n if p.grad is None: # pragma: no cover\n continue\n\n param_state = self.state[p]\n d_p = p.grad.data\n\n weight_norm = torch.linalg.norm(p.data)\n grad_norm = torch.linalg.norm(d_p)\n\n # Global LR computed on polynomial decay schedule\n decay = (1 - float(epoch) / max_epoch) ** 2\n global_lr = lr * decay\n\n # Compute local learning rate for this layer\n local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm)\n\n # Update the momentum term\n actual_lr = local_lr * global_lr\n\n if \"momentum_buffer\" not in param_state:\n buf = param_state[\"momentum_buffer\"] = torch.zeros_like(p.data) # type: ignore[attr-defined]\n else:\n buf = param_state[\"momentum_buffer\"]\n buf.mul_(momentum).add_(d_p + weight_decay * p.data, alpha=actual_lr)\n p.data.add_(-buf)\n\n return loss\n" + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to create manifests of data for use in Minerva pre-processing to reduce computation time.\"\"\"\n# TODO: Re-engineer for use with torchvision style datasets.\n# TODO: Consider use of parquet format rather than csv.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom minerva.datasets import make_manifest\nfrom minerva.utils import CONFIG, universal_path, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main():\n manifest = make_manifest(CONFIG)\n\n print(manifest)\n\n output_dir = universal_path(CONFIG[\"dir\"][\"cache\"])\n\n fn = output_dir / f\"{utils.get_dataset_name()}_Manifest.csv\"\n\n print(f\"MANIFEST TO FILE -----> {fn}\")\n manifest.to_csv(fn)\n\n\nif __name__ == \"__main__\":\n main()\n" }, "sourceLanguage": "Python" }, @@ -16162,9 +16515,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 6249, + "charLength": 2495, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# MIT License\n#\n# Copyright (c) 2018 Noah Golmant\n#\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n#\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n#\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n\"\"\"Custom :mod:`torch` optimisers.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Noah Golmant\"\n__license__ = \"MIT License\"\n__copyright__ = \"Copyright (c) 2018 Noah Golmant\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Callable, Dict, Iterable, Optional, Union\n\nimport torch\nfrom torch.optim.optimizer import Optimizer\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass LARS(Optimizer):\n r\"\"\"Implements layer-wise adaptive rate scaling for SGD.\n\n Source: https://github.com/noahgolmant/pytorch-lars/blob/master/lars.py\n\n Args:\n params (~typing.Iterable | dict): Iterable of parameters to optimize or dicts defining\n parameter groups\n lr (float): base learning rate (\\gamma_0)\n momentum (float, optional): momentum factor (default: 0) (\"m\")\n weight_decay (float, optional): weight decay (L2 penalty) (default: 0)\n (\"\\beta\")\n eta (float, optional): LARS coefficient\n max_epoch (int): maximum training epoch to determine polynomial LR decay.\n\n Based on Algorithm 1 of the following paper by You, Gitman, and Ginsburg.\n Large Batch Training of Convolutional Networks: https://arxiv.org/abs/1708.03888\n\n Example:\n >>> optimizer = LARS(model.parameters(), lr=0.1, eta=1e-3)\n >>> optimizer.zero_grad()\n >>> loss_fn(model(input), target).backward()\n >>> optimizer.step()\n \"\"\"\n\n def __init__(\n self,\n params: Union[Iterable[Any], Dict[Any, Any]],\n lr: float,\n momentum: float = 0.9,\n weight_decay: float = 0.0005,\n eta: float = 0.001,\n max_epoch: int = 200,\n ):\n if lr < 0.0:\n raise ValueError(\"Invalid learning rate: {}\".format(lr))\n if momentum < 0.0:\n raise ValueError(\"Invalid momentum value: {}\".format(momentum))\n if weight_decay < 0.0:\n raise ValueError(\"Invalid weight_decay value: {}\".format(weight_decay))\n if eta < 0.0:\n raise ValueError(\"Invalid LARS coefficient value: {}\".format(eta))\n\n self.epoch = 0\n defaults = dict(\n lr=lr,\n momentum=momentum,\n weight_decay=weight_decay,\n eta=eta,\n max_epoch=max_epoch,\n )\n super(LARS, self).__init__(params, defaults)\n\n def step( # type: ignore[override]\n self, epoch: Optional[int] = None, closure: Optional[Callable[..., Any]] = None\n ):\n \"\"\"Performs a single optimization step.\n\n Arguments:\n closure (callable, optional): A closure that reevaluates the model\n and returns the loss.\n epoch (int, optioanl): Current epoch to calculate polynomial LR decay schedule.\n if None, uses self.epoch and increments it.\n \"\"\"\n loss = None\n if closure is not None: # pragma: no cover\n loss = closure()\n\n if epoch is None:\n epoch = self.epoch\n self.epoch += 1\n\n for group in self.param_groups:\n weight_decay = group[\"weight_decay\"]\n momentum = group[\"momentum\"]\n eta = group[\"eta\"]\n lr = group[\"lr\"]\n max_epoch = group[\"max_epoch\"]\n\n for p in group[\"params\"]:\n if p.grad is None: # pragma: no cover\n continue\n\n param_state = self.state[p]\n d_p = p.grad.data\n\n weight_norm = torch.linalg.norm(p.data)\n grad_norm = torch.linalg.norm(d_p)\n\n # Global LR computed on polynomial decay schedule\n decay = (1 - float(epoch) / max_epoch) ** 2\n global_lr = lr * decay\n\n # Compute local learning rate for this layer\n local_lr = eta * weight_norm / (grad_norm + weight_decay * weight_norm)\n\n # Update the momentum term\n actual_lr = local_lr * global_lr\n\n if \"momentum_buffer\" not in param_state:\n buf = param_state[\"momentum_buffer\"] = torch.zeros_like(p.data) # type: ignore[attr-defined]\n else:\n buf = param_state[\"momentum_buffer\"]\n buf.mul_(momentum).add_(d_p + weight_decay * p.data, alpha=actual_lr)\n p.data.add_(-buf)\n\n return loss\n" + "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to create manifests of data for use in Minerva pre-processing to reduce computation time.\"\"\"\n# TODO: Re-engineer for use with torchvision style datasets.\n# TODO: Consider use of parquet format rather than csv.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom minerva.datasets import make_manifest\nfrom minerva.utils import CONFIG, universal_path, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main():\n manifest = make_manifest(CONFIG)\n\n print(manifest)\n\n output_dir = universal_path(CONFIG[\"dir\"][\"cache\"])\n\n fn = output_dir / f\"{utils.get_dataset_name()}_Manifest.csv\"\n\n print(f\"MANIFEST TO FILE -----> {fn}\")\n manifest.to_csv(fn)\n\n\nif __name__ == \"__main__\":\n main()\n" } } }, @@ -16177,7 +16530,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "3693ba3b9ce8e84975528bf13a0c3cf93fc575823b2cf7b7c18ab1cc4daa8aee" + "equalIndicator/v1": "53d480f49fbfd42f01b2c5a1180231f5ddbb2ee55ab69d0be794f7612491028b" }, "properties": { "ideaSeverity": "ERROR" @@ -16195,26 +16548,26 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/utils/__init__.py", + "uri": "notebooks/Torchgeo_FCN_Full.ipynb", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3562, + "charLength": 5948, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Utility functionality, visualisation and configuration for :mod:`minerva`.\n\nAttributes:\n CONFIG_NAME (str): Name of the config to be used in the experiment.\n CONFIG_PATH (str): Path to the config.\n MASTER_PARSER (~argparse.ArgumentParser): Argparser for the CLI for the config loading.\n CONFIG (dict[str, Any]): The master config loaded by :mod:`config_load`.\n AUX_CONFIGS (dict[str, Any]): Dictionary containing the auxilary configs loaded by :mod:`config_load`.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"universal_path\",\n \"CONFIG_NAME\",\n \"CONFIG_PATH\",\n \"MASTER_PARSER\",\n \"CONFIG\",\n \"AUX_CONFIGS\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom minerva.utils.config_load import check_paths, load_configs\nfrom minerva.utils.config_load import universal_path as universal_path # noqa: F401\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Objects to hold the config name and path.\nCONFIG_NAME: Optional[str]\nCONFIG_PATH: Optional[Path]\n\nMASTER_PARSER = argparse.ArgumentParser(add_help=False)\nMASTER_PARSER.add_argument(\n \"-c\",\n \"--config\",\n type=str,\n help=\"Path to the config file defining experiment\",\n)\nMASTER_PARSER.add_argument(\n \"--use-default-conf-dir\",\n dest=\"use_default_conf_dir\",\n action=\"store_true\",\n help=\"Set config path to default\",\n)\n_args, _ = MASTER_PARSER.parse_known_args()\n\n# Store the current working directory (i.e where script is being run from).\n_cwd = os.getcwd()\n\n_path, CONFIG_NAME, CONFIG_PATH = check_paths(_args.config, _args.use_default_conf_dir)\n\n# Loads the configs from file using paths found in sys.args.\nCONFIG, AUX_CONFIGS = load_configs(_path)\n\n# Change the working directory back to script location.\nos.chdir(_cwd)\n" + "text": "{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"import tempfile\\n\",\n \"from pathlib import Path\\n\",\n \"\\n\",\n \"from torch.utils.data import DataLoader\\n\",\n \"from torchvision.models.segmentation import fcn_resnet50\\n\",\n \"import torch.nn as nn\\n\",\n \"from torchgeo.datasets import ChesapeakeCVPR, stack_samples\\n\",\n \"from torchgeo.samplers import RandomGeoSampler\\n\",\n \"from torch.nn import CrossEntropyLoss\\n\",\n \"from torch.optim import Adam\\n\",\n \"import torch\\n\",\n \"import numpy as np\\n\",\n \"import matplotlib.pyplot as plt\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from minerva.models import FCN8ResNet18\\n\",\n \"from minerva.utils.utils import get_cuda_device\\n\",\n \"\\n\",\n \"device = get_cuda_device(0)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"data_root = Path(tempfile.gettempdir())\\n\",\n \"train_root = data_root / \\\"train\\\"\\n\",\n \"test_root = data_root / \\\"test\\\"\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"train_dataset = ChesapeakeCVPR(str(train_root), splits=[\\\"de-train\\\", \\\"ny-train\\\", \\\"wv-train\\\"], layers=[\\\"naip-new\\\", \\\"naip-old\\\", \\\"lc\\\"], download=True)\\n\",\n \"test_dataset = ChesapeakeCVPR(str(test_root), splits=[\\\"md-test\\\", \\\"va-test\\\"], layers=[\\\"naip-new\\\", \\\"naip-old\\\", \\\"lc\\\"], download=True)\\n\",\n \"\\n\",\n \"sampler = RandomGeoSampler(train_dataset, size=256, length=200)\\n\",\n \"dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\\n\",\n \"\\n\",\n \"testsampler = RandomGeoSampler(test_dataset, size=256, length=8)\\n\",\n \"testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\\n\",\n \"testdata = list(testdataloader)[0]\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"crit = CrossEntropyLoss()\\n\",\n \"\\n\",\n \"# Criterions are normally parsed to models at init in minerva.\\n\",\n \"fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"# Optimisers need to be set to a model in minerva before training.\\n\",\n \"fcn.set_optimiser(opt)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" \\n\",\n \" # Uses MinervaModel.step.\\n\",\n \" loss, pred = fcn.step(image, target, train=True)\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"fcn = fcn_resnet50(num_classes=13).to(device)\\n\",\n \"fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\\n\",\n \"\\n\",\n \"crit = CrossEntropyLoss()\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \"\\n\",\n \" opt.zero_grad()\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \" loss = crit(pred, target)\\n\",\n \" loss.backward()\\n\",\n \" opt.step()\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"minerva-310\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.10.9\"\n },\n \"orig_nbformat\": 4,\n \"vscode\": {\n \"interpreter\": {\n \"hash\": \"3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff\"\n }\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n" }, - "sourceLanguage": "Python" + "sourceLanguage": "JupyterPython" }, "contextRegion": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3562, + "charLength": 5948, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Utility functionality, visualisation and configuration for :mod:`minerva`.\n\nAttributes:\n CONFIG_NAME (str): Name of the config to be used in the experiment.\n CONFIG_PATH (str): Path to the config.\n MASTER_PARSER (~argparse.ArgumentParser): Argparser for the CLI for the config loading.\n CONFIG (dict[str, Any]): The master config loaded by :mod:`config_load`.\n AUX_CONFIGS (dict[str, Any]): Dictionary containing the auxilary configs loaded by :mod:`config_load`.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"universal_path\",\n \"CONFIG_NAME\",\n \"CONFIG_PATH\",\n \"MASTER_PARSER\",\n \"CONFIG\",\n \"AUX_CONFIGS\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n\nfrom minerva.utils.config_load import check_paths, load_configs\nfrom minerva.utils.config_load import universal_path as universal_path # noqa: F401\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Objects to hold the config name and path.\nCONFIG_NAME: Optional[str]\nCONFIG_PATH: Optional[Path]\n\nMASTER_PARSER = argparse.ArgumentParser(add_help=False)\nMASTER_PARSER.add_argument(\n \"-c\",\n \"--config\",\n type=str,\n help=\"Path to the config file defining experiment\",\n)\nMASTER_PARSER.add_argument(\n \"--use-default-conf-dir\",\n dest=\"use_default_conf_dir\",\n action=\"store_true\",\n help=\"Set config path to default\",\n)\n_args, _ = MASTER_PARSER.parse_known_args()\n\n# Store the current working directory (i.e where script is being run from).\n_cwd = os.getcwd()\n\n_path, CONFIG_NAME, CONFIG_PATH = check_paths(_args.config, _args.use_default_conf_dir)\n\n# Loads the configs from file using paths found in sys.args.\nCONFIG, AUX_CONFIGS = load_configs(_path)\n\n# Change the working directory back to script location.\nos.chdir(_cwd)\n" + "text": "{\n \"cells\": [\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"import tempfile\\n\",\n \"from pathlib import Path\\n\",\n \"\\n\",\n \"from torch.utils.data import DataLoader\\n\",\n \"from torchvision.models.segmentation import fcn_resnet50\\n\",\n \"import torch.nn as nn\\n\",\n \"from torchgeo.datasets import ChesapeakeCVPR, stack_samples\\n\",\n \"from torchgeo.samplers import RandomGeoSampler\\n\",\n \"from torch.nn import CrossEntropyLoss\\n\",\n \"from torch.optim import Adam\\n\",\n \"import torch\\n\",\n \"import numpy as np\\n\",\n \"import matplotlib.pyplot as plt\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"from minerva.models import FCN8ResNet18\\n\",\n \"from minerva.utils.utils import get_cuda_device\\n\",\n \"\\n\",\n \"device = get_cuda_device(0)\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"data_root = Path(tempfile.gettempdir())\\n\",\n \"train_root = data_root / \\\"train\\\"\\n\",\n \"test_root = data_root / \\\"test\\\"\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"train_dataset = ChesapeakeCVPR(str(train_root), splits=[\\\"de-train\\\", \\\"ny-train\\\", \\\"wv-train\\\"], layers=[\\\"naip-new\\\", \\\"naip-old\\\", \\\"lc\\\"], download=True)\\n\",\n \"test_dataset = ChesapeakeCVPR(str(test_root), splits=[\\\"md-test\\\", \\\"va-test\\\"], layers=[\\\"naip-new\\\", \\\"naip-old\\\", \\\"lc\\\"], download=True)\\n\",\n \"\\n\",\n \"sampler = RandomGeoSampler(train_dataset, size=256, length=200)\\n\",\n \"dataloader = DataLoader(train_dataset, sampler=sampler, collate_fn=stack_samples, batch_size=32)\\n\",\n \"\\n\",\n \"testsampler = RandomGeoSampler(test_dataset, size=256, length=8)\\n\",\n \"testdataloader = DataLoader(test_dataset, sampler=testsampler, collate_fn=stack_samples, batch_size=8, num_workers=4)\\n\",\n \"testdata = list(testdataloader)[0]\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"crit = CrossEntropyLoss()\\n\",\n \"\\n\",\n \"# Criterions are normally parsed to models at init in minerva.\\n\",\n \"fcn = FCN8ResNet18(crit, input_size=(4, 256, 256), n_classes=13).to(device)\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"# Optimisers need to be set to a model in minerva before training.\\n\",\n \"fcn.set_optimiser(opt)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" \\n\",\n \" # Uses MinervaModel.step.\\n\",\n \" loss, pred = fcn.step(image, target, train=True)\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"fcn = fcn_resnet50(num_classes=13).to(device)\\n\",\n \"fcn.backbone.conv1 = nn.Conv2d(4, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False).to(device)\\n\",\n \"\\n\",\n \"crit = CrossEntropyLoss()\\n\",\n \"opt = Adam(fcn.parameters(), lr=1e-3)\\n\",\n \"\\n\",\n \"for epoch in range(101):\\n\",\n \" losses = []\\n\",\n \" for i, sample in enumerate(dataloader):\\n\",\n \" image = sample[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = sample[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \"\\n\",\n \" opt.zero_grad()\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \" loss = crit(pred, target)\\n\",\n \" loss.backward()\\n\",\n \" opt.step()\\n\",\n \" losses.append(loss.item())\\n\",\n \"\\n\",\n \" print(epoch, np.mean(losses))\\n\",\n \" if epoch % 10 == 0:\\n\",\n \" with torch.no_grad():\\n\",\n \" image = testdata[\\\"image\\\"].to(device).float() / 255.0\\n\",\n \" target = testdata[\\\"mask\\\"].to(device).long().squeeze(1)\\n\",\n \" pred = fcn(image)[\\\"out\\\"]\\n\",\n \"\\n\",\n \" fig, axs = plt.subplots(3, pred.shape[0], figsize=(10,4))\\n\",\n \" for i in range(pred.shape[0]):\\n\",\n \" axs[0,i].imshow(image[i].cpu().numpy()[:3].transpose(1,2,0))\\n\",\n \" axs[1,i].imshow(target[i].cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" axs[2,i].imshow(pred[i].detach().argmax(dim=0).cpu().numpy(), cmap=\\\"Set3\\\", vmin=0, vmax=12)\\n\",\n \" plt.setp(plt.gcf().get_axes(), xticks=[], yticks=[])\\n\",\n \" plt.show()\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"minerva-310\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.10.9\"\n },\n \"orig_nbformat\": 4,\n \"vscode\": {\n \"interpreter\": {\n \"hash\": \"3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff\"\n }\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n" } } }, @@ -16227,7 +16580,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "e8fcc77d01bf4518e2a9e0e8ae149c967087d3511f8688e95006d0690b67b4f9" + "equalIndicator/v1": "ca08e1b130aeaae2d6b809d0273b67dabb058d5992671cb83d3197bca8728dfe" }, "properties": { "ideaSeverity": "ERROR" @@ -16245,16 +16598,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/modelio.py", + "uri": "minerva/samplers.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 9047, + "charLength": 11702, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle various IO from `dataloaders` and to models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"sup_tg\",\n \"ssl_pair_tg\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Dict, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import LongTensor, Tensor\nfrom torchgeo.datasets.utils import BoundingBox\n\nfrom minerva.models import MinervaModel\nfrom minerva.utils.utils import mask_to_ohe\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef sup_tg(\n batch: Dict[Any, Any],\n model: MinervaModel,\n device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], Tensor, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a supervised model using :mod:`torchgeo` datasets.\n\n Args:\n batch (dict[~typing.Any, ~typing.Any]): Batch of data in a :class:`dict`.\n Must have ``\"image\"``, ``\"mask\"`` and ``\"bbox\"`` keys.\n model (MinervaModel): Model being fitted.\n device (~torch.device): `torch` device object to send data to (e.g. CUDA device).\n mode (str): Mode of model fitting to use.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~typing.Sequence[~torchgeo.datasets.utils.BoundingBox]]:\n The ``loss``, the model output ``z``, the ground truth ``y`` supplied and the bounding boxes\n of the input images supplied.\n \"\"\"\n # Extracts the x and y batches from the dict.\n images: Tensor = batch[\"image\"]\n masks: Tensor = batch[\"mask\"]\n\n # Re-arranges the x and y batches.\n x_batch: Tensor = images.to(torch.float) # type: ignore[attr-defined]\n y_batch: Tensor\n\n # Squeeze out axis 1 if only 1 element wide.\n if masks.shape[1] == 1:\n masks = np.squeeze(masks.detach().cpu().numpy(), axis=1)\n\n if isinstance(masks, Tensor):\n masks = masks.detach().cpu().numpy()\n y_batch = torch.tensor(masks, dtype=torch.long) # type: ignore[attr-defined]\n\n # Transfer to GPU.\n x: Tensor = x_batch.to(device)\n y: Tensor = y_batch.to(device)\n\n # Runs a training epoch.\n if mode == \"train\":\n loss, z = model.step(x, y, train=True)\n\n # Runs a validation or test epoch.\n else:\n loss, z = model.step(x, y, train=False)\n\n bbox: Sequence[BoundingBox] = batch[\"bbox\"]\n assert isinstance(bbox, Sequence)\n return loss, z, y, bbox\n\n\ndef autoencoder_io(\n batch: Dict[Any, Any],\n model: MinervaModel,\n device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], Tensor, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for an autoencoder using :mod:`torchgeo` datasets by only using the same data\n for input and ground truth.\n\n Args:\n batch (dict[~typing.Any, ~typing.Any]): Batch of data in a :class:`dict`.\n Must have ``\"image\"``, ``\"mask\"`` and ``\"bbox\"`` keys.\n model (MinervaModel): Model being fitted.\n device (~torch.device): `torch` device object to send data to (e.g. CUDA device).\n mode (str): Mode of model fitting to use.\n\n Keyword args:\n autoencoder_data_key (str): Key of the data type in the sample dict to use for both input and ground truth.\n Must be either ``\"mask\"`` or ``\"image\"``.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~typing.Sequence[~torchgeo.datasets.utils.BoundingBox]]:\n The ``loss``, the model output ``z``, the ground truth ``y`` supplied and the bounding boxes\n of the input images supplied.\n\n Raises:\n ValueError: If the value given for ``key`` is not ``\"mask\"`` or ``\"image\"``.\n\n .. versionadded:: 0.23\n \"\"\"\n x: Tensor\n y: Tensor\n key = kwargs.get(\"autoencoder_data_key\")\n\n # Extracts the images and masks from the batch sample dict.\n images: Tensor = batch[\"image\"]\n masks: LongTensor = batch[\"mask\"]\n\n if key == \"mask\":\n # Squeeze out axis 1 if only 1 element wide.\n if masks.shape[1] == 1:\n _masks = torch.tensor(\n np.squeeze(masks.detach().cpu().numpy(), axis=1), dtype=torch.long\n )\n assert isinstance(_masks, LongTensor)\n masks = _masks\n\n input_masks: Tensor = torch.stack(\n tuple([mask_to_ohe(mask, kwargs.get(\"n_classes\", None)) for mask in masks])\n )\n output_masks: LongTensor = masks\n\n if isinstance(input_masks, Tensor):\n input_masks = input_masks.detach().cpu().numpy()\n\n if isinstance(output_masks, Tensor):\n output_masks = output_masks.detach().cpu().numpy()\n\n # Transfer to GPU and cast to correct dtypes.\n x = torch.tensor(input_masks, dtype=torch.float, device=device)\n y = torch.tensor(output_masks, dtype=torch.long, device=device)\n\n elif key == \"image\":\n # Extract the images from the batch, set to float, transfer to GPU and make x and y.\n x = images.to(dtype=torch.float, device=device)\n y = images.to(dtype=torch.float, device=device)\n\n else:\n raise ValueError(\n f\"The value of {key=} is not understood. Must be either 'mask' or 'image'\"\n )\n\n # Runs a training epoch.\n if mode == \"train\":\n loss, z = model.step(x, y, train=True)\n\n # Runs a validation or test epoch.\n else:\n loss, z = model.step(x, y, train=False)\n\n bbox: Sequence[BoundingBox] = batch[\"bbox\"]\n assert isinstance(bbox, Sequence)\n return loss, z, y, bbox\n\n\ndef ssl_pair_tg(\n batch: Tuple[Dict[str, Any], Dict[str, Any]],\n model: MinervaModel,\n device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], None, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a self-supervised Siamese model using :mod:`torchgeo` datasets.\n\n Args:\n batch (tuple[dict[str, ~typing.Any], dict[str, ~typing.Any]]): Pair of batches of data in :class:`dict` (s).\n Must have ``\"image\"`` and ``\"bbox\"`` keys.\n model (MinervaModel): Model being fitted.\n device (~torch.device): :mod:`torch` device object to send data to (e.g. ``CUDA`` device).\n mode (str): Mode of model fitting to use.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~typing.Sequence[~torchgeo.datasets.utils.BoundingBox]]: The\n ``loss``, the model output ``z``, the ``y`` supplied and the bounding boxes\n of the original input images supplied.\n \"\"\"\n # Extracts the x_i batch from the dict.\n x_i_batch: Tensor = batch[0][\"image\"]\n x_j_batch: Tensor = batch[1][\"image\"]\n\n # Ensures images are floats.\n x_i_batch = x_i_batch.to(torch.float) # type: ignore[attr-defined]\n x_j_batch = x_j_batch.to(torch.float) # type: ignore[attr-defined]\n\n # Stacks each side of the pair batches together.\n x_batch = torch.stack([x_i_batch, x_j_batch])\n\n # Transfer to GPU.\n x = x_batch.to(device, non_blocking=True)\n\n # Runs a training epoch.\n if mode == \"train\":\n loss, z = model.step(x, train=True)\n\n # Runs a validation epoch.\n else:\n loss, z = model.step(x, train=False)\n\n return loss, z, None, batch[0][\"bbox\"] + batch[1][\"bbox\"]\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module containing custom samplers for :mod:`torchgeo` datasets.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"RandomPairGeoSampler\",\n \"RandomPairBatchGeoSampler\",\n \"get_greater_bbox\",\n \"get_pair_bboxes\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport random\nfrom typing import Iterator, List, Optional, Sequence, Tuple, Union\n\nfrom torchgeo.datasets import GeoDataset\nfrom torchgeo.datasets.utils import BoundingBox\nfrom torchgeo.samplers import BatchGeoSampler, GeoSampler\nfrom torchgeo.samplers.utils import _to_tuple, get_random_bounding_box\n\nfrom minerva.utils import utils\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass RandomPairGeoSampler(GeoSampler):\n \"\"\"Samples geo-close pairs of elements from a region of interest randomly.\n\n An extension to :class:`~torchgeo.samplers.RandomGeoSampler` that supports paired sampling (i.e for GeoCLR).\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second :class:`float` for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n length (int): number of random samples to draw per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``).\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.length = length\n self.max_r = max_r\n self.hits = []\n for hit in self.index.intersection(tuple(self.roi), objects=True):\n bounds = BoundingBox(*hit.bounds) # type: ignore\n if (\n bounds.maxx - bounds.minx > self.size[1]\n and bounds.maxy - bounds.miny > self.size[0]\n ):\n self.hits.append(hit)\n\n def __iter__(self) -> Iterator[Tuple[BoundingBox, BoundingBox]]: # type: ignore[override]\n \"\"\"Return a pair of :class:`~torchgeo.datasets.utils.BoundingBox` indices of a dataset\n that are geospatially close.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Tuple of\n bounding boxes to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n # Choose a random tile.\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds)\n\n bbox_a, bbox_b = get_pair_bboxes(bounds, self.size, self.res, self.max_r)\n\n yield bbox_a, bbox_b\n\n def __len__(self) -> int:\n \"\"\"Return the number of samples in a single epoch.\n\n Returns:\n int: Length of the epoch.\n \"\"\"\n return self.length\n\n\nclass RandomPairBatchGeoSampler(BatchGeoSampler):\n \"\"\"Samples batches of pairs of elements from a region of interest randomly.\n\n This is particularly useful during training when you want to maximize the size of\n the dataset and return as many random :term:`patches` as possible.\n\n An extension to :class:`~torchgeo.samplers.RandomBatchGeoSampler` that supports\n paired sampling (i.e. for GeoCLR) and ability to samples from multiple tiles per batch\n to increase variance of batch.\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second *float* for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n batch_size (int): Number of samples per batch.\n length (int): Number of samples per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``)\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n tiles_per_batch (int): Optional; Number of tiles to sample from per batch.\n Must be a multiple of ``batch_size``.\n\n Raises:\n ValueError: If ``tiles_per_batch`` is not a multiple of ``batch_size``.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n batch_size: int,\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n tiles_per_batch: int = 4,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.batch_size = batch_size\n self.length = length\n self.max_r = max_r\n self.hits = list(self.index.intersection(tuple(self.roi), objects=True))\n\n self.tiles_per_batch = tiles_per_batch\n\n if self.batch_size % tiles_per_batch == 0:\n self.sam_per_tile = self.batch_size // tiles_per_batch\n else:\n raise ValueError(f\"{tiles_per_batch=} is not a multiple of {batch_size=}\")\n\n def __iter__(self) -> Iterator[List[Tuple[BoundingBox, BoundingBox]]]: # type: ignore[override]\n \"\"\"Return the indices of a dataset.\n\n Returns:\n Batch of paired :class:`~torchgeo.datasets.utils.BoundingBox` to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n batch = []\n for _ in range(self.tiles_per_batch):\n # Choose a random tile\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds) # type: ignore\n\n # Choose random indices within that tile\n for _ in range(self.sam_per_tile):\n bbox_a, bbox_b = get_pair_bboxes(\n bounds, self.size, self.res, self.max_r\n )\n batch.append((bbox_a, bbox_b))\n\n yield batch\n\n def __len__(self) -> int:\n \"\"\"Return the number of batches in a single epoch.\n\n Returns:\n int: Number of batches in an epoch\n \"\"\"\n return self.length // self.batch_size\n\n\ndef get_greater_bbox(\n bbox: BoundingBox, r: float, size: Union[float, int, Sequence[float]]\n) -> BoundingBox:\n \"\"\"Return a bounding box at ``r`` distance around the first box.\n\n Args:\n bbox (~torchgeo.datasets.utils.BoundingBox): Bounding box of the original sample.\n r (float): Distance in pixels to extend the original bounding box by\n to get a new greater bounds to sample from.\n size (float | ~typing.Sequence[float]): The (``x``, ``y``) size of the :term:`patch` that ``bbox``\n represents in pixels. Will only use size[0] if a :class:`~typing.Sequence`.\n\n Returns:\n ~torchgeo.datasets.utils.BoundingBox: Greater bounds around original bounding box to sample from.\n \"\"\"\n x: float\n if isinstance(size, Sequence):\n assert isinstance(size, Sequence)\n x = float(size[0])\n else:\n assert isinstance(size, (float, int))\n x = float(size)\n\n # Calculates the geospatial distance to add to the existing bounding box to get\n # the box to sample the other side of the pair from.\n r_in_crs = r * abs(bbox.maxx - bbox.minx) / float(x)\n\n return BoundingBox(\n bbox.minx - r_in_crs,\n bbox.maxx + r_in_crs,\n bbox.miny - r_in_crs,\n bbox.maxy + r_in_crs,\n bbox.mint,\n bbox.maxt,\n )\n\n\ndef get_pair_bboxes(\n bounds: BoundingBox,\n size: Union[Tuple[float, float], float],\n res: float,\n max_r: float,\n) -> Tuple[BoundingBox, BoundingBox]:\n \"\"\"Samples a pair of bounding boxes geo-spatially close to each other.\n\n Args:\n bounds (~torchgeo.datasets.utils.BoundingBox): Maximum bounds of the :term:`tile` to sample pair from.\n size (tuple[float, float] | float): Size of each :term:`patch`.\n res (float): Resolution to sample :term:`patch` at.\n max_r (float): Padding around original :term:`patch` to sample new :term:`patch` from.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Pair of bounding boxes\n to sample pair of patches from dataset.\n \"\"\"\n # Choose a random index within that tile.\n bbox_a = get_random_bounding_box(bounds, size, res)\n\n max_bounds = get_greater_bbox(bbox_a, max_r, size)\n\n # Check that the new bbox cannot exceed the bounds of the tile.\n max_bounds = utils.check_within_bounds(max_bounds, bounds)\n\n # Randomly sample another box at a max distance of max_r from box_a.\n bbox_b = get_random_bounding_box(max_bounds, size, res)\n\n return bbox_a, bbox_b\n" }, "sourceLanguage": "Python" }, @@ -16262,9 +16615,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 9047, + "charLength": 11702, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle various IO from `dataloaders` and to models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"sup_tg\",\n \"ssl_pair_tg\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Dict, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch\nfrom torch import LongTensor, Tensor\nfrom torchgeo.datasets.utils import BoundingBox\n\nfrom minerva.models import MinervaModel\nfrom minerva.utils.utils import mask_to_ohe\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef sup_tg(\n batch: Dict[Any, Any],\n model: MinervaModel,\n device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], Tensor, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a supervised model using :mod:`torchgeo` datasets.\n\n Args:\n batch (dict[~typing.Any, ~typing.Any]): Batch of data in a :class:`dict`.\n Must have ``\"image\"``, ``\"mask\"`` and ``\"bbox\"`` keys.\n model (MinervaModel): Model being fitted.\n device (~torch.device): `torch` device object to send data to (e.g. CUDA device).\n mode (str): Mode of model fitting to use.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~typing.Sequence[~torchgeo.datasets.utils.BoundingBox]]:\n The ``loss``, the model output ``z``, the ground truth ``y`` supplied and the bounding boxes\n of the input images supplied.\n \"\"\"\n # Extracts the x and y batches from the dict.\n images: Tensor = batch[\"image\"]\n masks: Tensor = batch[\"mask\"]\n\n # Re-arranges the x and y batches.\n x_batch: Tensor = images.to(torch.float) # type: ignore[attr-defined]\n y_batch: Tensor\n\n # Squeeze out axis 1 if only 1 element wide.\n if masks.shape[1] == 1:\n masks = np.squeeze(masks.detach().cpu().numpy(), axis=1)\n\n if isinstance(masks, Tensor):\n masks = masks.detach().cpu().numpy()\n y_batch = torch.tensor(masks, dtype=torch.long) # type: ignore[attr-defined]\n\n # Transfer to GPU.\n x: Tensor = x_batch.to(device)\n y: Tensor = y_batch.to(device)\n\n # Runs a training epoch.\n if mode == \"train\":\n loss, z = model.step(x, y, train=True)\n\n # Runs a validation or test epoch.\n else:\n loss, z = model.step(x, y, train=False)\n\n bbox: Sequence[BoundingBox] = batch[\"bbox\"]\n assert isinstance(bbox, Sequence)\n return loss, z, y, bbox\n\n\ndef autoencoder_io(\n batch: Dict[Any, Any],\n model: MinervaModel,\n device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], Tensor, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for an autoencoder using :mod:`torchgeo` datasets by only using the same data\n for input and ground truth.\n\n Args:\n batch (dict[~typing.Any, ~typing.Any]): Batch of data in a :class:`dict`.\n Must have ``\"image\"``, ``\"mask\"`` and ``\"bbox\"`` keys.\n model (MinervaModel): Model being fitted.\n device (~torch.device): `torch` device object to send data to (e.g. CUDA device).\n mode (str): Mode of model fitting to use.\n\n Keyword args:\n autoencoder_data_key (str): Key of the data type in the sample dict to use for both input and ground truth.\n Must be either ``\"mask\"`` or ``\"image\"``.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~typing.Sequence[~torchgeo.datasets.utils.BoundingBox]]:\n The ``loss``, the model output ``z``, the ground truth ``y`` supplied and the bounding boxes\n of the input images supplied.\n\n Raises:\n ValueError: If the value given for ``key`` is not ``\"mask\"`` or ``\"image\"``.\n\n .. versionadded:: 0.23\n \"\"\"\n x: Tensor\n y: Tensor\n key = kwargs.get(\"autoencoder_data_key\")\n\n # Extracts the images and masks from the batch sample dict.\n images: Tensor = batch[\"image\"]\n masks: LongTensor = batch[\"mask\"]\n\n if key == \"mask\":\n # Squeeze out axis 1 if only 1 element wide.\n if masks.shape[1] == 1:\n _masks = torch.tensor(\n np.squeeze(masks.detach().cpu().numpy(), axis=1), dtype=torch.long\n )\n assert isinstance(_masks, LongTensor)\n masks = _masks\n\n input_masks: Tensor = torch.stack(\n tuple([mask_to_ohe(mask, kwargs.get(\"n_classes\", None)) for mask in masks])\n )\n output_masks: LongTensor = masks\n\n if isinstance(input_masks, Tensor):\n input_masks = input_masks.detach().cpu().numpy()\n\n if isinstance(output_masks, Tensor):\n output_masks = output_masks.detach().cpu().numpy()\n\n # Transfer to GPU and cast to correct dtypes.\n x = torch.tensor(input_masks, dtype=torch.float, device=device)\n y = torch.tensor(output_masks, dtype=torch.long, device=device)\n\n elif key == \"image\":\n # Extract the images from the batch, set to float, transfer to GPU and make x and y.\n x = images.to(dtype=torch.float, device=device)\n y = images.to(dtype=torch.float, device=device)\n\n else:\n raise ValueError(\n f\"The value of {key=} is not understood. Must be either 'mask' or 'image'\"\n )\n\n # Runs a training epoch.\n if mode == \"train\":\n loss, z = model.step(x, y, train=True)\n\n # Runs a validation or test epoch.\n else:\n loss, z = model.step(x, y, train=False)\n\n bbox: Sequence[BoundingBox] = batch[\"bbox\"]\n assert isinstance(bbox, Sequence)\n return loss, z, y, bbox\n\n\ndef ssl_pair_tg(\n batch: Tuple[Dict[str, Any], Dict[str, Any]],\n model: MinervaModel,\n device: torch.device, # type: ignore[name-defined]\n mode: str,\n **kwargs,\n) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]], None, Sequence[BoundingBox]]:\n \"\"\"Provides IO functionality for a self-supervised Siamese model using :mod:`torchgeo` datasets.\n\n Args:\n batch (tuple[dict[str, ~typing.Any], dict[str, ~typing.Any]]): Pair of batches of data in :class:`dict` (s).\n Must have ``\"image\"`` and ``\"bbox\"`` keys.\n model (MinervaModel): Model being fitted.\n device (~torch.device): :mod:`torch` device object to send data to (e.g. ``CUDA`` device).\n mode (str): Mode of model fitting to use.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~typing.Sequence[~torchgeo.datasets.utils.BoundingBox]]: The\n ``loss``, the model output ``z``, the ``y`` supplied and the bounding boxes\n of the original input images supplied.\n \"\"\"\n # Extracts the x_i batch from the dict.\n x_i_batch: Tensor = batch[0][\"image\"]\n x_j_batch: Tensor = batch[1][\"image\"]\n\n # Ensures images are floats.\n x_i_batch = x_i_batch.to(torch.float) # type: ignore[attr-defined]\n x_j_batch = x_j_batch.to(torch.float) # type: ignore[attr-defined]\n\n # Stacks each side of the pair batches together.\n x_batch = torch.stack([x_i_batch, x_j_batch])\n\n # Transfer to GPU.\n x = x_batch.to(device, non_blocking=True)\n\n # Runs a training epoch.\n if mode == \"train\":\n loss, z = model.step(x, train=True)\n\n # Runs a validation epoch.\n else:\n loss, z = model.step(x, train=False)\n\n return loss, z, None, batch[0][\"bbox\"] + batch[1][\"bbox\"]\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module containing custom samplers for :mod:`torchgeo` datasets.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"RandomPairGeoSampler\",\n \"RandomPairBatchGeoSampler\",\n \"get_greater_bbox\",\n \"get_pair_bboxes\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport random\nfrom typing import Iterator, List, Optional, Sequence, Tuple, Union\n\nfrom torchgeo.datasets import GeoDataset\nfrom torchgeo.datasets.utils import BoundingBox\nfrom torchgeo.samplers import BatchGeoSampler, GeoSampler\nfrom torchgeo.samplers.utils import _to_tuple, get_random_bounding_box\n\nfrom minerva.utils import utils\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass RandomPairGeoSampler(GeoSampler):\n \"\"\"Samples geo-close pairs of elements from a region of interest randomly.\n\n An extension to :class:`~torchgeo.samplers.RandomGeoSampler` that supports paired sampling (i.e for GeoCLR).\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second :class:`float` for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n length (int): number of random samples to draw per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``).\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.length = length\n self.max_r = max_r\n self.hits = []\n for hit in self.index.intersection(tuple(self.roi), objects=True):\n bounds = BoundingBox(*hit.bounds) # type: ignore\n if (\n bounds.maxx - bounds.minx > self.size[1]\n and bounds.maxy - bounds.miny > self.size[0]\n ):\n self.hits.append(hit)\n\n def __iter__(self) -> Iterator[Tuple[BoundingBox, BoundingBox]]: # type: ignore[override]\n \"\"\"Return a pair of :class:`~torchgeo.datasets.utils.BoundingBox` indices of a dataset\n that are geospatially close.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Tuple of\n bounding boxes to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n # Choose a random tile.\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds)\n\n bbox_a, bbox_b = get_pair_bboxes(bounds, self.size, self.res, self.max_r)\n\n yield bbox_a, bbox_b\n\n def __len__(self) -> int:\n \"\"\"Return the number of samples in a single epoch.\n\n Returns:\n int: Length of the epoch.\n \"\"\"\n return self.length\n\n\nclass RandomPairBatchGeoSampler(BatchGeoSampler):\n \"\"\"Samples batches of pairs of elements from a region of interest randomly.\n\n This is particularly useful during training when you want to maximize the size of\n the dataset and return as many random :term:`patches` as possible.\n\n An extension to :class:`~torchgeo.samplers.RandomBatchGeoSampler` that supports\n paired sampling (i.e. for GeoCLR) and ability to samples from multiple tiles per batch\n to increase variance of batch.\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second *float* for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n batch_size (int): Number of samples per batch.\n length (int): Number of samples per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``)\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n tiles_per_batch (int): Optional; Number of tiles to sample from per batch.\n Must be a multiple of ``batch_size``.\n\n Raises:\n ValueError: If ``tiles_per_batch`` is not a multiple of ``batch_size``.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n batch_size: int,\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n tiles_per_batch: int = 4,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.batch_size = batch_size\n self.length = length\n self.max_r = max_r\n self.hits = list(self.index.intersection(tuple(self.roi), objects=True))\n\n self.tiles_per_batch = tiles_per_batch\n\n if self.batch_size % tiles_per_batch == 0:\n self.sam_per_tile = self.batch_size // tiles_per_batch\n else:\n raise ValueError(f\"{tiles_per_batch=} is not a multiple of {batch_size=}\")\n\n def __iter__(self) -> Iterator[List[Tuple[BoundingBox, BoundingBox]]]: # type: ignore[override]\n \"\"\"Return the indices of a dataset.\n\n Returns:\n Batch of paired :class:`~torchgeo.datasets.utils.BoundingBox` to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n batch = []\n for _ in range(self.tiles_per_batch):\n # Choose a random tile\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds) # type: ignore\n\n # Choose random indices within that tile\n for _ in range(self.sam_per_tile):\n bbox_a, bbox_b = get_pair_bboxes(\n bounds, self.size, self.res, self.max_r\n )\n batch.append((bbox_a, bbox_b))\n\n yield batch\n\n def __len__(self) -> int:\n \"\"\"Return the number of batches in a single epoch.\n\n Returns:\n int: Number of batches in an epoch\n \"\"\"\n return self.length // self.batch_size\n\n\ndef get_greater_bbox(\n bbox: BoundingBox, r: float, size: Union[float, int, Sequence[float]]\n) -> BoundingBox:\n \"\"\"Return a bounding box at ``r`` distance around the first box.\n\n Args:\n bbox (~torchgeo.datasets.utils.BoundingBox): Bounding box of the original sample.\n r (float): Distance in pixels to extend the original bounding box by\n to get a new greater bounds to sample from.\n size (float | ~typing.Sequence[float]): The (``x``, ``y``) size of the :term:`patch` that ``bbox``\n represents in pixels. Will only use size[0] if a :class:`~typing.Sequence`.\n\n Returns:\n ~torchgeo.datasets.utils.BoundingBox: Greater bounds around original bounding box to sample from.\n \"\"\"\n x: float\n if isinstance(size, Sequence):\n assert isinstance(size, Sequence)\n x = float(size[0])\n else:\n assert isinstance(size, (float, int))\n x = float(size)\n\n # Calculates the geospatial distance to add to the existing bounding box to get\n # the box to sample the other side of the pair from.\n r_in_crs = r * abs(bbox.maxx - bbox.minx) / float(x)\n\n return BoundingBox(\n bbox.minx - r_in_crs,\n bbox.maxx + r_in_crs,\n bbox.miny - r_in_crs,\n bbox.maxy + r_in_crs,\n bbox.mint,\n bbox.maxt,\n )\n\n\ndef get_pair_bboxes(\n bounds: BoundingBox,\n size: Union[Tuple[float, float], float],\n res: float,\n max_r: float,\n) -> Tuple[BoundingBox, BoundingBox]:\n \"\"\"Samples a pair of bounding boxes geo-spatially close to each other.\n\n Args:\n bounds (~torchgeo.datasets.utils.BoundingBox): Maximum bounds of the :term:`tile` to sample pair from.\n size (tuple[float, float] | float): Size of each :term:`patch`.\n res (float): Resolution to sample :term:`patch` at.\n max_r (float): Padding around original :term:`patch` to sample new :term:`patch` from.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Pair of bounding boxes\n to sample pair of patches from dataset.\n \"\"\"\n # Choose a random index within that tile.\n bbox_a = get_random_bounding_box(bounds, size, res)\n\n max_bounds = get_greater_bbox(bbox_a, max_r, size)\n\n # Check that the new bbox cannot exceed the bounds of the tile.\n max_bounds = utils.check_within_bounds(max_bounds, bounds)\n\n # Randomly sample another box at a max distance of max_r from box_a.\n bbox_b = get_random_bounding_box(max_bounds, size, res)\n\n return bbox_a, bbox_b\n" } } }, @@ -16277,7 +16630,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "0d03501d2ace4ce6750a50267bbf4bf0c7fb3b1b123583950c993d1f190e33b3" + "equalIndicator/v1": "72011a249f45d866080876c5f57efc4221be20c11e5a9a1311e6ef49bdb478a0" }, "properties": { "ideaSeverity": "ERROR" @@ -16295,16 +16648,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/samplers.py", + "uri": "minerva/models/__init__.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 11307, + "charLength": 3848, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module containing custom samplers for :mod:`torchgeo` datasets.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"RandomPairGeoSampler\",\n \"RandomPairBatchGeoSampler\",\n \"get_greater_bbox\",\n \"get_pair_bboxes\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport random\nfrom typing import Iterator, List, Optional, Sequence, Tuple, Union\n\nfrom torchgeo.datasets import GeoDataset\nfrom torchgeo.datasets.utils import BoundingBox\nfrom torchgeo.samplers import BatchGeoSampler, GeoSampler\nfrom torchgeo.samplers.utils import _to_tuple, get_random_bounding_box\n\nfrom minerva.utils import utils\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass RandomPairGeoSampler(GeoSampler):\n \"\"\"Samples geo-close pairs of elements from a region of interest randomly.\n\n An extension to :class:`~torchgeo.samplers.RandomGeoSampler` that supports paired sampling (i.e for GeoCLR).\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second :class:`float` for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n length (int): number of random samples to draw per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``).\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.length = length\n self.max_r = max_r\n self.hits = []\n for hit in self.index.intersection(tuple(self.roi), objects=True):\n bounds = BoundingBox(*hit.bounds) # type: ignore\n if (\n bounds.maxx - bounds.minx > self.size[1]\n and bounds.maxy - bounds.miny > self.size[0]\n ):\n self.hits.append(hit)\n\n def __iter__(self) -> Iterator[Tuple[BoundingBox, BoundingBox]]: # type: ignore[override]\n \"\"\"Return a pair of :class:`~torchgeo.datasets.utils.BoundingBox` indices of a dataset\n that are geospatially close.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Tuple of\n bounding boxes to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n # Choose a random tile.\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds)\n\n bbox_a, bbox_b = get_pair_bboxes(bounds, self.size, self.res, self.max_r)\n\n yield bbox_a, bbox_b\n\n def __len__(self) -> int:\n \"\"\"Return the number of samples in a single epoch.\n\n Returns:\n int: Length of the epoch.\n \"\"\"\n return self.length\n\n\nclass RandomPairBatchGeoSampler(BatchGeoSampler):\n \"\"\"Samples batches of pairs of elements from a region of interest randomly.\n\n This is particularly useful during training when you want to maximize the size of\n the dataset and return as many random :term:`patches` as possible.\n\n An extension to :class:`~torchgeo.samplers.RandomBatchGeoSampler` that supports\n paired sampling (i.e. for GeoCLR) and ability to samples from multiple tiles per batch\n to increase variance of batch.\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second *float* for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n batch_size (int): Number of samples per batch.\n length (int): Number of samples per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``)\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n tiles_per_batch (int): Optional; Number of tiles to sample from per batch.\n Must be a multiple of ``batch_size``.\n\n Raises:\n ValueError: If ``tiles_per_batch`` is not a multiple of ``batch_size``.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n batch_size: int,\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n tiles_per_batch: int = 4,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.batch_size = batch_size\n self.length = length\n self.max_r = max_r\n self.hits = list(self.index.intersection(tuple(self.roi), objects=True))\n\n self.tiles_per_batch = tiles_per_batch\n\n if self.batch_size % tiles_per_batch == 0:\n self.sam_per_tile = self.batch_size // tiles_per_batch\n else:\n raise ValueError(f\"{tiles_per_batch=} is not a multiple of {batch_size=}\")\n\n def __iter__(self) -> Iterator[List[Tuple[BoundingBox, BoundingBox]]]: # type: ignore[override]\n \"\"\"Return the indices of a dataset.\n\n Returns:\n Batch of paired :class:`~torchgeo.datasets.utils.BoundingBox` to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n batch = []\n for _ in range(self.tiles_per_batch):\n # Choose a random tile\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds) # type: ignore\n\n # Choose random indices within that tile\n for _ in range(self.sam_per_tile):\n bbox_a, bbox_b = get_pair_bboxes(\n bounds, self.size, self.res, self.max_r\n )\n batch.append((bbox_a, bbox_b))\n\n yield batch\n\n def __len__(self) -> int:\n \"\"\"Return the number of batches in a single epoch.\n\n Returns:\n int: Number of batches in an epoch\n \"\"\"\n return self.length // self.batch_size\n\n\ndef get_greater_bbox(\n bbox: BoundingBox, r: float, size: Union[float, int, Sequence[float]]\n) -> BoundingBox:\n \"\"\"Return a bounding box at ``r`` distance around the first box.\n\n Args:\n bbox (~torchgeo.datasets.utils.BoundingBox): Bounding box of the original sample.\n r (float): Distance in pixels to extend the original bounding box by\n to get a new greater bounds to sample from.\n size (float | ~typing.Sequence[float]): The (``x``, ``y``) size of the :term:`patch` that ``bbox``\n represents in pixels. Will only use size[0] if a :class:`~typing.Sequence`.\n\n Returns:\n ~torchgeo.datasets.utils.BoundingBox: Greater bounds around original bounding box to sample from.\n \"\"\"\n x: float\n if isinstance(size, Sequence):\n assert isinstance(size, Sequence)\n x = float(size[0])\n else:\n assert isinstance(size, (float, int))\n x = float(size)\n\n # Calculates the geospatial distance to add to the existing bounding box to get\n # the box to sample the other side of the pair from.\n r_in_crs = r * abs(bbox.maxx - bbox.minx) / float(x)\n\n return BoundingBox(\n bbox.minx - r_in_crs,\n bbox.maxx + r_in_crs,\n bbox.miny - r_in_crs,\n bbox.maxy + r_in_crs,\n bbox.mint,\n bbox.maxt,\n )\n\n\ndef get_pair_bboxes(\n bounds: BoundingBox,\n size: Union[Tuple[float, float], float],\n res: float,\n max_r: float,\n) -> Tuple[BoundingBox, BoundingBox]:\n \"\"\"Samples a pair of bounding boxes geo-spatially close to each other.\n\n Args:\n bounds (~torchgeo.datasets.utils.BoundingBox): Maximum bounds of the :term:`tile` to sample pair from.\n size (tuple[float, float] | float): Size of each :term:`patch`.\n res (float): Resolution to sample :term:`patch` at.\n max_r (float): Padding around original :term:`patch` to sample new :term:`patch` from.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Pair of bounding boxes\n to sample pair of patches from dataset.\n \"\"\"\n # Choose a random index within that tile.\n bbox_a = get_random_bounding_box(bounds, size, res)\n\n max_bounds = get_greater_bbox(bbox_a, max_r, size)\n\n # Check that the new bbox cannot exceed the bounds of the tile.\n max_bounds = utils.check_within_bounds(max_bounds, bounds)\n\n # Randomly sample another box at a max distance of max_r from box_a.\n bbox_b = get_random_bounding_box(max_bounds, size, res)\n\n return bbox_a, bbox_b\n" + "text": "# -*- coding: utf-8 -*-\n# flake8: noqa: F401\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\":mod:`models` contains several types of models designed to work within :mod:`minerva`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom .__depreciated import CNN as CNN\nfrom .__depreciated import MLP as MLP\nfrom .core import MinervaBackbone as MinervaBackbone\nfrom .core import MinervaDataParallel as MinervaDataParallel\nfrom .core import MinervaModel as MinervaModel\nfrom .core import MinervaOnnxModel as MinervaOnnxModel\nfrom .core import MinervaWrapper as MinervaWrapper\nfrom .core import bilinear_init as bilinear_init\nfrom .core import get_output_shape as get_output_shape\nfrom .core import get_torch_weights as get_torch_weights\nfrom .fcn import FCN8ResNet18 as FCN8ResNet18\nfrom .fcn import FCN8ResNet34 as FCN8ResNet34\nfrom .fcn import FCN8ResNet50 as FCN8ResNet50\nfrom .fcn import FCN8ResNet101 as FCN8ResNet101\nfrom .fcn import FCN8ResNet152 as FCN8ResNet152\nfrom .fcn import FCN16ResNet18 as FCN16ResNet18\nfrom .fcn import FCN16ResNet34 as FCN16ResNet34\nfrom .fcn import FCN16ResNet50 as FCN16ResNet50\nfrom .fcn import FCN32ResNet18 as FCN32ResNet18\nfrom .fcn import FCN32ResNet34 as FCN32ResNet34\nfrom .fcn import FCN32ResNet50 as FCN32ResNet50\nfrom .resnet import ResNet18 as ResNet18\nfrom .resnet import ResNet34 as ResNet34\nfrom .resnet import ResNet50 as ResNet50\nfrom .resnet import ResNet101 as ResNet101\nfrom .resnet import ResNet152 as ResNet152\nfrom .siamese import MinervaSiamese as MinervaSiamese\nfrom .siamese import SimCLR18 as SimCLR18\nfrom .siamese import SimCLR34 as SimCLR34\nfrom .siamese import SimCLR50 as SimCLR50\nfrom .siamese import SimSiam18 as SimSiam18\nfrom .siamese import SimSiam34 as SimSiam34\nfrom .siamese import SimSiam50 as SimSiam50\nfrom .unet import UNet as UNet\nfrom .unet import UNetR18 as UNetR18\nfrom .unet import UNetR34 as UNetR34\nfrom .unet import UNetR50 as UNetR50\nfrom .unet import UNetR101 as UNetR101\nfrom .unet import UNetR152 as UNetR152\n" }, "sourceLanguage": "Python" }, @@ -16312,9 +16665,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 11307, + "charLength": 3848, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module containing custom samplers for :mod:`torchgeo` datasets.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"RandomPairGeoSampler\",\n \"RandomPairBatchGeoSampler\",\n \"get_greater_bbox\",\n \"get_pair_bboxes\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport random\nfrom typing import Iterator, List, Optional, Sequence, Tuple, Union\n\nfrom torchgeo.datasets import GeoDataset\nfrom torchgeo.datasets.utils import BoundingBox\nfrom torchgeo.samplers import BatchGeoSampler, GeoSampler\nfrom torchgeo.samplers.utils import _to_tuple, get_random_bounding_box\n\nfrom minerva.utils import utils\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass RandomPairGeoSampler(GeoSampler):\n \"\"\"Samples geo-close pairs of elements from a region of interest randomly.\n\n An extension to :class:`~torchgeo.samplers.RandomGeoSampler` that supports paired sampling (i.e for GeoCLR).\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second :class:`float` for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n length (int): number of random samples to draw per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``).\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.length = length\n self.max_r = max_r\n self.hits = []\n for hit in self.index.intersection(tuple(self.roi), objects=True):\n bounds = BoundingBox(*hit.bounds) # type: ignore\n if (\n bounds.maxx - bounds.minx > self.size[1]\n and bounds.maxy - bounds.miny > self.size[0]\n ):\n self.hits.append(hit)\n\n def __iter__(self) -> Iterator[Tuple[BoundingBox, BoundingBox]]: # type: ignore[override]\n \"\"\"Return a pair of :class:`~torchgeo.datasets.utils.BoundingBox` indices of a dataset\n that are geospatially close.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Tuple of\n bounding boxes to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n # Choose a random tile.\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds)\n\n bbox_a, bbox_b = get_pair_bboxes(bounds, self.size, self.res, self.max_r)\n\n yield bbox_a, bbox_b\n\n def __len__(self) -> int:\n \"\"\"Return the number of samples in a single epoch.\n\n Returns:\n int: Length of the epoch.\n \"\"\"\n return self.length\n\n\nclass RandomPairBatchGeoSampler(BatchGeoSampler):\n \"\"\"Samples batches of pairs of elements from a region of interest randomly.\n\n This is particularly useful during training when you want to maximize the size of\n the dataset and return as many random :term:`patches` as possible.\n\n An extension to :class:`~torchgeo.samplers.RandomBatchGeoSampler` that supports\n paired sampling (i.e. for GeoCLR) and ability to samples from multiple tiles per batch\n to increase variance of batch.\n\n .. note::\n The ``size`` argument can either be:\n\n * a single :class:`float` - in which case the same value is used for the height and\n width dimension\n * a :class:`tuple` of two floats - in which case, the first :class:`float` is used for the\n height dimension, and the second *float* for the width dimension\n\n Args:\n dataset (~torchgeo.datasets.GeoDataset): Dataset to index from.\n size (tuple[float, float] | float): Dimensions of each :term:`patch` in units of CRS.\n batch_size (int): Number of samples per batch.\n length (int): Number of samples per epoch.\n roi (~torchgeo.datasets.utils.BoundingBox): Optional; Region of interest to sample from\n (``minx``, ``maxx``, ``miny``, ``maxy``, ``mint``, ``maxt``). (defaults to the bounds of ``dataset.index``)\n max_r (float): Optional; Maximum geo-spatial distance (from centre to centre)\n to sample matching sample from.\n tiles_per_batch (int): Optional; Number of tiles to sample from per batch.\n Must be a multiple of ``batch_size``.\n\n Raises:\n ValueError: If ``tiles_per_batch`` is not a multiple of ``batch_size``.\n \"\"\"\n\n def __init__(\n self,\n dataset: GeoDataset,\n size: Union[Tuple[float, float], float],\n batch_size: int,\n length: int,\n roi: Optional[BoundingBox] = None,\n max_r: float = 256.0,\n tiles_per_batch: int = 4,\n ) -> None:\n super().__init__(dataset, roi)\n self.size = _to_tuple(size)\n self.batch_size = batch_size\n self.length = length\n self.max_r = max_r\n self.hits = list(self.index.intersection(tuple(self.roi), objects=True))\n\n self.tiles_per_batch = tiles_per_batch\n\n if self.batch_size % tiles_per_batch == 0:\n self.sam_per_tile = self.batch_size // tiles_per_batch\n else:\n raise ValueError(f\"{tiles_per_batch=} is not a multiple of {batch_size=}\")\n\n def __iter__(self) -> Iterator[List[Tuple[BoundingBox, BoundingBox]]]: # type: ignore[override]\n \"\"\"Return the indices of a dataset.\n\n Returns:\n Batch of paired :class:`~torchgeo.datasets.utils.BoundingBox` to index a dataset.\n \"\"\"\n for _ in range(len(self)):\n batch = []\n for _ in range(self.tiles_per_batch):\n # Choose a random tile\n hit = random.choice(self.hits)\n bounds = BoundingBox(*hit.bounds) # type: ignore\n\n # Choose random indices within that tile\n for _ in range(self.sam_per_tile):\n bbox_a, bbox_b = get_pair_bboxes(\n bounds, self.size, self.res, self.max_r\n )\n batch.append((bbox_a, bbox_b))\n\n yield batch\n\n def __len__(self) -> int:\n \"\"\"Return the number of batches in a single epoch.\n\n Returns:\n int: Number of batches in an epoch\n \"\"\"\n return self.length // self.batch_size\n\n\ndef get_greater_bbox(\n bbox: BoundingBox, r: float, size: Union[float, int, Sequence[float]]\n) -> BoundingBox:\n \"\"\"Return a bounding box at ``r`` distance around the first box.\n\n Args:\n bbox (~torchgeo.datasets.utils.BoundingBox): Bounding box of the original sample.\n r (float): Distance in pixels to extend the original bounding box by\n to get a new greater bounds to sample from.\n size (float | ~typing.Sequence[float]): The (``x``, ``y``) size of the :term:`patch` that ``bbox``\n represents in pixels. Will only use size[0] if a :class:`~typing.Sequence`.\n\n Returns:\n ~torchgeo.datasets.utils.BoundingBox: Greater bounds around original bounding box to sample from.\n \"\"\"\n x: float\n if isinstance(size, Sequence):\n assert isinstance(size, Sequence)\n x = float(size[0])\n else:\n assert isinstance(size, (float, int))\n x = float(size)\n\n # Calculates the geospatial distance to add to the existing bounding box to get\n # the box to sample the other side of the pair from.\n r_in_crs = r * abs(bbox.maxx - bbox.minx) / float(x)\n\n return BoundingBox(\n bbox.minx - r_in_crs,\n bbox.maxx + r_in_crs,\n bbox.miny - r_in_crs,\n bbox.maxy + r_in_crs,\n bbox.mint,\n bbox.maxt,\n )\n\n\ndef get_pair_bboxes(\n bounds: BoundingBox,\n size: Union[Tuple[float, float], float],\n res: float,\n max_r: float,\n) -> Tuple[BoundingBox, BoundingBox]:\n \"\"\"Samples a pair of bounding boxes geo-spatially close to each other.\n\n Args:\n bounds (~torchgeo.datasets.utils.BoundingBox): Maximum bounds of the :term:`tile` to sample pair from.\n size (tuple[float, float] | float): Size of each :term:`patch`.\n res (float): Resolution to sample :term:`patch` at.\n max_r (float): Padding around original :term:`patch` to sample new :term:`patch` from.\n\n Returns:\n tuple[~torchgeo.datasets.utils.BoundingBox, ~torchgeo.datasets.utils.BoundingBox]: Pair of bounding boxes\n to sample pair of patches from dataset.\n \"\"\"\n # Choose a random index within that tile.\n bbox_a = get_random_bounding_box(bounds, size, res)\n\n max_bounds = get_greater_bbox(bbox_a, max_r, size)\n\n # Check that the new bbox cannot exceed the bounds of the tile.\n max_bounds = utils.check_within_bounds(max_bounds, bounds)\n\n # Randomly sample another box at a max distance of max_r from box_a.\n bbox_b = get_random_bounding_box(max_bounds, size, res)\n\n return bbox_a, bbox_b\n" + "text": "# -*- coding: utf-8 -*-\n# flake8: noqa: F401\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\":mod:`models` contains several types of models designed to work within :mod:`minerva`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom .__depreciated import CNN as CNN\nfrom .__depreciated import MLP as MLP\nfrom .core import MinervaBackbone as MinervaBackbone\nfrom .core import MinervaDataParallel as MinervaDataParallel\nfrom .core import MinervaModel as MinervaModel\nfrom .core import MinervaOnnxModel as MinervaOnnxModel\nfrom .core import MinervaWrapper as MinervaWrapper\nfrom .core import bilinear_init as bilinear_init\nfrom .core import get_output_shape as get_output_shape\nfrom .core import get_torch_weights as get_torch_weights\nfrom .fcn import FCN8ResNet18 as FCN8ResNet18\nfrom .fcn import FCN8ResNet34 as FCN8ResNet34\nfrom .fcn import FCN8ResNet50 as FCN8ResNet50\nfrom .fcn import FCN8ResNet101 as FCN8ResNet101\nfrom .fcn import FCN8ResNet152 as FCN8ResNet152\nfrom .fcn import FCN16ResNet18 as FCN16ResNet18\nfrom .fcn import FCN16ResNet34 as FCN16ResNet34\nfrom .fcn import FCN16ResNet50 as FCN16ResNet50\nfrom .fcn import FCN32ResNet18 as FCN32ResNet18\nfrom .fcn import FCN32ResNet34 as FCN32ResNet34\nfrom .fcn import FCN32ResNet50 as FCN32ResNet50\nfrom .resnet import ResNet18 as ResNet18\nfrom .resnet import ResNet34 as ResNet34\nfrom .resnet import ResNet50 as ResNet50\nfrom .resnet import ResNet101 as ResNet101\nfrom .resnet import ResNet152 as ResNet152\nfrom .siamese import MinervaSiamese as MinervaSiamese\nfrom .siamese import SimCLR18 as SimCLR18\nfrom .siamese import SimCLR34 as SimCLR34\nfrom .siamese import SimCLR50 as SimCLR50\nfrom .siamese import SimSiam18 as SimSiam18\nfrom .siamese import SimSiam34 as SimSiam34\nfrom .siamese import SimSiam50 as SimSiam50\nfrom .unet import UNet as UNet\nfrom .unet import UNetR18 as UNetR18\nfrom .unet import UNetR34 as UNetR34\nfrom .unet import UNetR50 as UNetR50\nfrom .unet import UNetR101 as UNetR101\nfrom .unet import UNetR152 as UNetR152\n" } } }, @@ -16327,7 +16680,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "0c70befaf06d4fbe73023edb06fd2ec81a886b9da218619e36f132fd324c0349" + "equalIndicator/v1": "efe0fbc867dbcce4dec21a51bcdfe496d8611a063dc751e0db07bbd4cf09a14b" }, "properties": { "ideaSeverity": "ERROR" @@ -16345,16 +16698,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/__init__.py", + "uri": "minerva/models/siamese.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3453, + "charLength": 15972, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# flake8: noqa: F401\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\":mod:`models` contains several types of models designed to work within :mod:`minerva`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom .__depreciated import CNN as CNN\nfrom .__depreciated import MLP as MLP\nfrom .core import MinervaBackbone as MinervaBackbone\nfrom .core import MinervaDataParallel as MinervaDataParallel\nfrom .core import MinervaModel as MinervaModel\nfrom .core import MinervaOnnxModel as MinervaOnnxModel\nfrom .core import MinervaWrapper as MinervaWrapper\nfrom .core import bilinear_init as bilinear_init\nfrom .core import get_output_shape as get_output_shape\nfrom .core import get_torch_weights as get_torch_weights\nfrom .fcn import FCN8ResNet18 as FCN8ResNet18\nfrom .fcn import FCN8ResNet34 as FCN8ResNet34\nfrom .fcn import FCN8ResNet50 as FCN8ResNet50\nfrom .fcn import FCN8ResNet101 as FCN8ResNet101\nfrom .fcn import FCN8ResNet152 as FCN8ResNet152\nfrom .fcn import FCN16ResNet18 as FCN16ResNet18\nfrom .fcn import FCN16ResNet34 as FCN16ResNet34\nfrom .fcn import FCN16ResNet50 as FCN16ResNet50\nfrom .fcn import FCN32ResNet18 as FCN32ResNet18\nfrom .fcn import FCN32ResNet34 as FCN32ResNet34\nfrom .fcn import FCN32ResNet50 as FCN32ResNet50\nfrom .resnet import ResNet18 as ResNet18\nfrom .resnet import ResNet34 as ResNet34\nfrom .resnet import ResNet50 as ResNet50\nfrom .resnet import ResNet101 as ResNet101\nfrom .resnet import ResNet152 as ResNet152\nfrom .siamese import MinervaSiamese as MinervaSiamese\nfrom .siamese import SimCLR18 as SimCLR18\nfrom .siamese import SimCLR34 as SimCLR34\nfrom .siamese import SimCLR50 as SimCLR50\nfrom .siamese import SimSiam18 as SimSiam18\nfrom .siamese import SimSiam34 as SimSiam34\nfrom .siamese import SimSiam50 as SimSiam50\nfrom .unet import UNet as UNet\nfrom .unet import UNetR18 as UNetR18\nfrom .unet import UNetR34 as UNetR34\nfrom .unet import UNetR50 as UNetR50\nfrom .unet import UNetR101 as UNetR101\nfrom .unet import UNetR152 as UNetR152\n" + "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Siamese models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaSiamese\",\n \"SimCLR\",\n \"SimCLR18\",\n \"SimCLR34\",\n \"SimCLR50\",\n \"SimSiam\",\n \"SimSiam18\",\n \"SimSiam34\",\n \"SimSiam50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom typing import Any, Dict, Sequence, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom .core import MinervaBackbone, MinervaModel, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaSiamese(MinervaBackbone):\n \"\"\"Abstract class for Siamese models.\n\n Attributes:\n backbone (MinervaModel): The backbone encoder for the Siamese model.\n proj_head (~torch.nn.Module): The projection head for re-projecting the outputs\n from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.backbone: MinervaModel\n self.proj_head: Module\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Can be called directly as a method (e.g. ``model.forward()``) or when\n data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector ``A``.\n * Output feature vector ``B``.\n * Detached embedding, ``A``, from the :attr:`~MinervaSiamese.backbone`.\n * Detached embedding, ``B``, from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n return self.forward_pair(x)\n\n def forward_pair(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector A.\n * Output feature vector B.\n * Embedding, A, from the backbone.\n * Embedding, B, from the backbone.\n \"\"\"\n g_a, f_a = self.forward_single(x[0])\n g_b, f_b = self.forward_single(x[1])\n\n g = torch.cat([g_a, g_b], dim=0) # type: ignore[attr-defined]\n\n assert isinstance(g, Tensor)\n\n return g, g_a, g_b, f_a, f_b\n\n @abc.abstractmethod\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the backbone\n and feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the projection head\n and the detached embedding vector from the backbone.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n\nclass SimCLR(MinervaSiamese):\n \"\"\"Base SimCLR class to be subclassed by SimCLR variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimCLR.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimCLR that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from\n the :attr:`~SimCLR.backbone` encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimCLR.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimCLR, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n self.proj_head = nn.Sequential(\n nn.Linear(np.prod(backbone_out_shape), 512, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Linear(512, feature_dim, bias=False),\n )\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the\n :attr:`~SimCLR.backbone` and feeding its output into the :attr:`~SimCLR.proj_head`.\n\n Overwrites :meth:`MinervaSiamese.forward_single`\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the\n :attr:`~SimCLR.proj_head` and the detached embedding vector from the :attr:`~SimCLR.backbone`.\n \"\"\"\n f: Tensor = torch.flatten(self.backbone(x)[0], start_dim=1)\n g: Tensor = self.proj_head(f)\n\n return g, f\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n z, z_a, z_b, _, _ = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = self.criterion(z_a, z_b) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, z\n\n\nclass SimCLR18(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet18` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimCLR34(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet32` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimCLR50(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet50` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n\n\nclass SimSiam(MinervaSiamese):\n \"\"\"Base SimSiam class to be subclassed by SimSiam variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimSiam.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimSiam that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from the backbone encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimSiam.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n pred_dim: int = 512,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimSiam, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n prev_dim = np.prod(backbone_out_shape)\n\n self.proj_head = nn.Sequential( # type: ignore[arg-type]\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # first layer\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # second layer\n nn.Linear(prev_dim, feature_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(feature_dim, affine=False),\n ) # output layer\n # self.proj_head[6].bias.requires_grad = False # hack: not use bias as it is followed by BN\n\n # Build a 2-layer predictor.\n self.predictor = nn.Sequential(\n nn.Linear(feature_dim, pred_dim, bias=False),\n nn.BatchNorm1d(pred_dim),\n nn.ReLU(inplace=True), # hidden layer\n nn.Linear(pred_dim, feature_dim),\n ) # output layer\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of :class:`SimSiam` by using the forward methods of the backbone\n and feeding its output into the :attr:`~SimSiam.proj_head`.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from :attr:`~SimSiam.proj_head`\n and the detached embedding vector from the :attr:`~SimSiam.backbone`.\n \"\"\"\n z: Tensor = self.proj_head(torch.flatten(self.backbone(x)[0], start_dim=1)) # type: ignore[attr-defined]\n\n p: Tensor = self.predictor(z)\n\n return p, z.detach()\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n p, p_a, p_b, z_a, z_b = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = 0.5 * (self.criterion(z_a, p_b) + self.criterion(z_b, p_a)) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, p\n\n\nclass SimSiam18(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet18` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimSiam34(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet34` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimSiam50(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet50` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n" }, "sourceLanguage": "Python" }, @@ -16362,9 +16715,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3453, + "charLength": 15972, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# flake8: noqa: F401\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\":mod:`models` contains several types of models designed to work within :mod:`minerva`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom .__depreciated import CNN as CNN\nfrom .__depreciated import MLP as MLP\nfrom .core import MinervaBackbone as MinervaBackbone\nfrom .core import MinervaDataParallel as MinervaDataParallel\nfrom .core import MinervaModel as MinervaModel\nfrom .core import MinervaOnnxModel as MinervaOnnxModel\nfrom .core import MinervaWrapper as MinervaWrapper\nfrom .core import bilinear_init as bilinear_init\nfrom .core import get_output_shape as get_output_shape\nfrom .core import get_torch_weights as get_torch_weights\nfrom .fcn import FCN8ResNet18 as FCN8ResNet18\nfrom .fcn import FCN8ResNet34 as FCN8ResNet34\nfrom .fcn import FCN8ResNet50 as FCN8ResNet50\nfrom .fcn import FCN8ResNet101 as FCN8ResNet101\nfrom .fcn import FCN8ResNet152 as FCN8ResNet152\nfrom .fcn import FCN16ResNet18 as FCN16ResNet18\nfrom .fcn import FCN16ResNet34 as FCN16ResNet34\nfrom .fcn import FCN16ResNet50 as FCN16ResNet50\nfrom .fcn import FCN32ResNet18 as FCN32ResNet18\nfrom .fcn import FCN32ResNet34 as FCN32ResNet34\nfrom .fcn import FCN32ResNet50 as FCN32ResNet50\nfrom .resnet import ResNet18 as ResNet18\nfrom .resnet import ResNet34 as ResNet34\nfrom .resnet import ResNet50 as ResNet50\nfrom .resnet import ResNet101 as ResNet101\nfrom .resnet import ResNet152 as ResNet152\nfrom .siamese import MinervaSiamese as MinervaSiamese\nfrom .siamese import SimCLR18 as SimCLR18\nfrom .siamese import SimCLR34 as SimCLR34\nfrom .siamese import SimCLR50 as SimCLR50\nfrom .siamese import SimSiam18 as SimSiam18\nfrom .siamese import SimSiam34 as SimSiam34\nfrom .siamese import SimSiam50 as SimSiam50\nfrom .unet import UNet as UNet\nfrom .unet import UNetR18 as UNetR18\nfrom .unet import UNetR34 as UNetR34\nfrom .unet import UNetR50 as UNetR50\nfrom .unet import UNetR101 as UNetR101\nfrom .unet import UNetR152 as UNetR152\n" + "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Siamese models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaSiamese\",\n \"SimCLR\",\n \"SimCLR18\",\n \"SimCLR34\",\n \"SimCLR50\",\n \"SimSiam\",\n \"SimSiam18\",\n \"SimSiam34\",\n \"SimSiam50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom typing import Any, Dict, Sequence, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom .core import MinervaBackbone, MinervaModel, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaSiamese(MinervaBackbone):\n \"\"\"Abstract class for Siamese models.\n\n Attributes:\n backbone (MinervaModel): The backbone encoder for the Siamese model.\n proj_head (~torch.nn.Module): The projection head for re-projecting the outputs\n from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.backbone: MinervaModel\n self.proj_head: Module\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Can be called directly as a method (e.g. ``model.forward()``) or when\n data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector ``A``.\n * Output feature vector ``B``.\n * Detached embedding, ``A``, from the :attr:`~MinervaSiamese.backbone`.\n * Detached embedding, ``B``, from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n return self.forward_pair(x)\n\n def forward_pair(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector A.\n * Output feature vector B.\n * Embedding, A, from the backbone.\n * Embedding, B, from the backbone.\n \"\"\"\n g_a, f_a = self.forward_single(x[0])\n g_b, f_b = self.forward_single(x[1])\n\n g = torch.cat([g_a, g_b], dim=0) # type: ignore[attr-defined]\n\n assert isinstance(g, Tensor)\n\n return g, g_a, g_b, f_a, f_b\n\n @abc.abstractmethod\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the backbone\n and feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the projection head\n and the detached embedding vector from the backbone.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n\nclass SimCLR(MinervaSiamese):\n \"\"\"Base SimCLR class to be subclassed by SimCLR variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimCLR.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimCLR that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from\n the :attr:`~SimCLR.backbone` encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimCLR.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimCLR, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n self.proj_head = nn.Sequential(\n nn.Linear(np.prod(backbone_out_shape), 512, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Linear(512, feature_dim, bias=False),\n )\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the\n :attr:`~SimCLR.backbone` and feeding its output into the :attr:`~SimCLR.proj_head`.\n\n Overwrites :meth:`MinervaSiamese.forward_single`\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the\n :attr:`~SimCLR.proj_head` and the detached embedding vector from the :attr:`~SimCLR.backbone`.\n \"\"\"\n f: Tensor = torch.flatten(self.backbone(x)[0], start_dim=1)\n g: Tensor = self.proj_head(f)\n\n return g, f\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n z, z_a, z_b, _, _ = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = self.criterion(z_a, z_b) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, z\n\n\nclass SimCLR18(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet18` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimCLR34(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet32` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimCLR50(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet50` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n\n\nclass SimSiam(MinervaSiamese):\n \"\"\"Base SimSiam class to be subclassed by SimSiam variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimSiam.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimSiam that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from the backbone encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimSiam.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n pred_dim: int = 512,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimSiam, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n prev_dim = np.prod(backbone_out_shape)\n\n self.proj_head = nn.Sequential( # type: ignore[arg-type]\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # first layer\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # second layer\n nn.Linear(prev_dim, feature_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(feature_dim, affine=False),\n ) # output layer\n # self.proj_head[6].bias.requires_grad = False # hack: not use bias as it is followed by BN\n\n # Build a 2-layer predictor.\n self.predictor = nn.Sequential(\n nn.Linear(feature_dim, pred_dim, bias=False),\n nn.BatchNorm1d(pred_dim),\n nn.ReLU(inplace=True), # hidden layer\n nn.Linear(pred_dim, feature_dim),\n ) # output layer\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of :class:`SimSiam` by using the forward methods of the backbone\n and feeding its output into the :attr:`~SimSiam.proj_head`.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from :attr:`~SimSiam.proj_head`\n and the detached embedding vector from the :attr:`~SimSiam.backbone`.\n \"\"\"\n z: Tensor = self.proj_head(torch.flatten(self.backbone(x)[0], start_dim=1)) # type: ignore[attr-defined]\n\n p: Tensor = self.predictor(z)\n\n return p, z.detach()\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n p, p_a, p_b, z_a, z_b = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = 0.5 * (self.criterion(z_a, p_b) + self.criterion(z_b, p_a)) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, p\n\n\nclass SimSiam18(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet18` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimSiam34(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet34` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimSiam50(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet50` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n" } } }, @@ -16377,7 +16730,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "42db9c863020de285479fd4e9acbd5bc844072d22a2a30dbff881e94de4310cc" + "equalIndicator/v1": "bc89519c58841dfdba376160bec91be311fadf4263d4f3ec003380f7a5c37bc9" }, "properties": { "ideaSeverity": "ERROR" @@ -16395,16 +16748,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/MinervaPipe.py", + "uri": "minerva/logger.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3215, + "charLength": 25928, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to handle the pre-training of model and its subsequent downstream task fine-tuning.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport subprocess\nimport sys\nfrom typing import Any, Dict\n\nimport yaml\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(config_path: str):\n with open(config_path) as f:\n config: Dict[str, Any] = yaml.safe_load(f)\n\n for key in config.keys():\n print(\n f\"\\nExecuting {key} experiment + =====================================================================\"\n )\n\n try:\n exit_code = subprocess.Popen( # nosec B602\n f\"python MinervaExp.py -c {config[key]}\",\n shell=True,\n ).wait()\n\n if exit_code != 0:\n raise SystemExit()\n except KeyboardInterrupt as err:\n print(f\"{err}: Skipping to next experiment...\")\n\n except SystemExit as err:\n print(err)\n print(f\"Error in {key} experiment -> ABORT\")\n sys.exit(exit_code) # type: ignore\n\n print(\n f\"\\n{key} experiment COMPLETE + =====================================================================\"\n )\n\n print(\"\\nPipeline COMPLETE\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config_path\", type=str)\n args = parser.parse_args()\n\n main(config_path=args.config_path)\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle the logging of results from various model types.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\nfrom __future__ import annotations\n\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaLogger\",\n \"STGLogger\",\n \"SSLLogger\",\n \"KNNLogger\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nimport math\nfrom abc import ABC\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Optional,\n SupportsFloat,\n Tuple,\n Union,\n)\n\nimport mlflow\nimport numpy as np\nimport torch\nfrom sklearn.metrics import jaccard_score\nfrom torch import Tensor\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.utils.tensorboard.writer import SummaryWriter\n\nfrom torchgeo.datasets.utils import BoundingBox\nfrom wandb.sdk.wandb_run import Run\n\nfrom minerva.utils import utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n_tensorflow_exist = utils.check_optional_import_exist(\"tensorflow\")\nTENSORBOARD_WRITER: Optional[Callable[..., Any]]\ntry:\n TENSORBOARD_WRITER = utils._optional_import(\n \"torch.utils.tensorboard.writer\",\n name=\"SummaryWriter\",\n package=\"tensorflow\",\n )\nexcept ImportError as err: # pragma: no cover\n print(err)\n print(\"Disabling TensorBoard logging\")\n TENSORBOARD_WRITER = None\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaLogger(ABC):\n \"\"\"Base abstract class for all :mod:`minerva` logger classes to ensure intercompatibility with\n :class:`~trainer.Trainer`.\n\n Attributes:\n record_int (bool): Whether to record the integer values from an epoch of model fitting.\n record_float (bool): Whether to record the floating point values from an epoch of model fitting.\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n logs (dict[str, ~typing.Any]): Dictionary to hold the logs from the epoch.\n Logs should be more lightweight than ``results``.\n results (dict[str, ~typing.Any]): Dictionary to hold the results from the epoch.\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wandb_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super(MinervaLogger, self).__init__()\n self.record_int = record_int\n self.record_float = record_float\n self.n_batches = n_batches\n self.batch_size = batch_size\n self.n_samples = n_samples\n self.writer = writer\n\n self.logs: Dict[str, Any] = {}\n self.results: Dict[str, Any] = {}\n\n def __call__(self, mode: str, step_num: int, loss: Tensor, *args) -> None:\n \"\"\"Call :meth:`log`.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n\n Returns:\n None\n \"\"\"\n self.log(mode, step_num, loss, *args)\n\n @abc.abstractmethod\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Abstract logging method, the core functionality of a logger. Must be overwritten.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n z (~torch.Tensor): Optional; Output tensor from the model.\n y (~torch.Tensor): Optional; Labels to assess model output against.\n bbox (~torchgeo.datasets.utils.BoundingBox): Optional; Bounding boxes of the input samples.\n\n Returns:\n None\n \"\"\"\n pass # pragma: no cover\n\n def write_metric(\n self, mode: str, key: str, value: SupportsFloat, step_num: Optional[int] = None\n ):\n \"\"\"Write metric values to logging backends after calculation.\n\n Args:\n mode (str): Mode of model fitting.\n key (str): Key for the metric that ``value`` belongs to.\n value (SupportsFloat): Metric to write to logger.\n step_num (int): Optional; Global step number for this ``mode`` of fitting.\n\n \"\"\"\n # TODO: Are values being reduced across nodes / logged from rank 0?\n if self.writer:\n if _tensorflow_exist:\n if (\n isinstance(\n self.writer, utils.extract_class_type(TENSORBOARD_WRITER)\n )\n and self.writer\n ):\n self.writer.add_scalar( # type: ignore[attr-defined]\n tag=f\"{mode}_{key}\",\n scalar_value=value, # type: ignore[attr-defined]\n global_step=step_num,\n )\n if isinstance(self.writer, Run):\n self.writer.log({f\"{mode}/step\": step_num, f\"{mode}/{key}\": value})\n\n if mlflow.active_run():\n # If running in Azure Machine Learning, tracking URI / experiment ID set already\n # https://learn.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow-cli-runs?tabs=python%2Cmlflow#creating-a-training-routine # noqa: E501\n mlflow.log_metric(key, value) # pragma: no cover\n\n @property\n def get_logs(self) -> Dict[str, Any]:\n \"\"\"Gets the logs dictionary.\n\n Returns:\n dict[str, ~typing.Any]: Log dictionary of the logger.\n \"\"\"\n return self.logs\n\n @property\n def get_results(self) -> Dict[str, Any]:\n \"\"\"Gets the results dictionary.\n\n Returns:\n dict[str, ~typing.Any]: Results dictionary of the logger.\n \"\"\"\n return self.results\n\n\nclass STGLogger(MinervaLogger):\n \"\"\"Logger designed for supervised learning using :mod:`torchgeo` datasets.\n\n Attributes:\n logs (dict[str, ~typing.Any]): The main logs from the KNN with these metrics:\n\n * ``batch_num``\n * ``total_loss``\n * ``total_correct``\n * ``total_top5``\n\n results (dict[str, ~typing.Any]): Hold these additional, full results from the KNN:\n\n * ``y``\n * ``z``\n * ``probs``\n * ``ids``\n * ``bounds``\n\n calc_miou (bool): Activates the calculating and logging of :term:`MIoU` for segmentation models.\n Places the metric in the ``total_miou`` key of ``logs``.\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n out_shape (int | tuple[int, ...]): Shape of the model output.\n n_classes (int): Number of classes in dataset.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wandb_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n\n Raises:\n MemoryError: If trying to allocate memory to hold the probabilites of predictions\n from the model exceeds capacity.\n MemoryError: If trying to allocate memory to hold the bounding boxes of samples would exceed capacity.\n \"\"\"\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n out_shape: Union[int, Tuple[int, ...]],\n n_classes: int,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super(STGLogger, self).__init__(\n n_batches,\n batch_size,\n n_samples,\n record_int,\n record_float,\n writer,\n )\n _out_shape: Tuple[int, ...]\n\n if isinstance(out_shape, int):\n _out_shape = (out_shape,)\n else:\n _out_shape = out_shape\n\n self.logs: Dict[str, Any] = {\n \"batch_num\": 0,\n \"total_loss\": 0.0,\n \"total_correct\": 0.0,\n }\n\n self.results: Dict[str, Any] = {\n \"y\": None,\n \"z\": None,\n \"probs\": None,\n \"ids\": [],\n \"bounds\": None,\n }\n self.calc_miou = True if kwargs.get(\"model_type\") == \"segmentation\" else False\n\n if self.calc_miou:\n self.logs[\"total_miou\"] = 0.0\n\n # Allocate memory for the integer values to be recorded.\n if self.record_int:\n int_log_shape: Tuple[int, ...]\n if kwargs.get(\"model_type\") == \"scene classifier\":\n int_log_shape = (self.n_batches, self.batch_size)\n else:\n int_log_shape = (self.n_batches, self.batch_size, *_out_shape)\n\n self.results[\"z\"] = np.empty(int_log_shape, dtype=np.uint8)\n self.results[\"y\"] = np.empty(int_log_shape, dtype=np.uint8)\n\n # Allocate memory for the floating point values to be recorded.\n if self.record_float:\n float_log_shape: Tuple[int, ...]\n if kwargs.get(\"model_type\") == \"scene classifier\":\n float_log_shape = (self.n_batches, self.batch_size, n_classes)\n else:\n float_log_shape = (\n self.n_batches,\n self.batch_size,\n n_classes,\n *_out_shape,\n )\n\n try:\n self.results[\"probs\"] = np.empty(float_log_shape, dtype=np.float16)\n except MemoryError: # pragma: no cover\n raise MemoryError(\n \"Dataset too large to record probabilities of predicted classes!\"\n )\n\n try:\n self.results[\"bounds\"] = np.empty(\n (self.n_batches, self.batch_size), dtype=object\n )\n except MemoryError: # pragma: no cover\n raise MemoryError(\n \"Dataset too large to record bounding boxes of samples!\"\n )\n\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Logs the outputs and results from a step of model fitting. Overwrites abstract method.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n z (~torch.Tensor): Output tensor from the model.\n y (~torch.Tensor): Labels to assess model output against.\n bbox (~torchgeo.datasets.utils.BoundingBox): Bounding boxes of the input samples.\n\n Returns:\n None\n \"\"\"\n\n assert z is not None\n assert y is not None\n\n if self.record_int:\n # Arg max the estimated probabilities and add to predictions.\n self.results[\"z\"][self.logs[\"batch_num\"]] = torch.argmax(z, 1).cpu().numpy() # type: ignore[attr-defined]\n\n # Add the labels and sample IDs to lists.\n self.results[\"y\"][self.logs[\"batch_num\"]] = y.cpu().numpy()\n batch_ids = []\n for i in range(\n self.logs[\"batch_num\"] * self.batch_size,\n (self.logs[\"batch_num\"] + 1) * self.batch_size,\n ):\n batch_ids.append(str(i).zfill(len(str(self.n_samples))))\n self.results[\"ids\"].append(batch_ids)\n\n if self.record_float:\n assert bbox is not None\n # Add the estimated probabilities to probs.\n self.results[\"probs\"][self.logs[\"batch_num\"]] = z.detach().cpu().numpy()\n self.results[\"bounds\"][self.logs[\"batch_num\"]] = bbox\n\n # Computes the loss and the correct predictions from this step.\n ls = loss.item()\n correct = (torch.argmax(z, 1) == y).sum().item() # type: ignore[attr-defined]\n\n # Adds loss and correct predictions to logs.\n self.logs[\"total_loss\"] += ls\n self.logs[\"total_correct\"] += correct\n\n if self.calc_miou:\n assert y is not None\n y_true = y.detach().cpu().numpy()\n y_pred = torch.argmax(z, 1).detach().cpu().numpy() # type: ignore[attr-defined]\n miou = 0.0\n for i in range(len(y)):\n miou += float(\n jaccard_score(\n y_true[i].flatten(), y_pred[i].flatten(), average=\"macro\"\n )\n ) # noqa: E501 type: ignore[attr-defined]\n self.logs[\"total_miou\"] += miou\n\n self.write_metric(mode, \"miou\", miou / len(y), step_num=step_num)\n\n # Writes loss and correct predictions to the writer.\n self.write_metric(mode, \"loss\", ls, step_num=step_num)\n self.write_metric(\n mode, \"acc\", correct / len(torch.flatten(y)), step_num=step_num\n )\n\n # Adds 1 to batch number (step number).\n self.logs[\"batch_num\"] += 1\n\n\nclass KNNLogger(MinervaLogger):\n \"\"\"Logger specifically designed for use with the KNN validation in\n :meth:`trainer.Trainer.weighted_knn_validation`.\n\n Attributes:\n logs (dict[str, ~typing.Any]): The main logs from the KNN with these metrics:\n\n * ``batch_num``\n * ``total_loss``\n * ``total_correct``\n * ``total_top5``\n\n results (dict[str, ~typing.Any]): Hold these additional, full results from the KNN:\n\n * ``y``\n * ``z``\n * ``probs``\n * ``ids``\n * ``bounds``\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wand_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n \"\"\"\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super().__init__(\n n_batches, batch_size, n_samples, record_int, record_float, writer, **kwargs\n )\n\n self.logs: Dict[str, Any] = {\n \"batch_num\": 0,\n \"total_loss\": 0.0,\n \"total_correct\": 0.0,\n \"total_top5\": 0.0,\n }\n\n self.results: Dict[str, Any] = {\n \"y\": None,\n \"z\": None,\n \"probs\": None,\n \"ids\": [],\n \"bounds\": None,\n }\n\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n assert isinstance(z, Tensor)\n assert isinstance(y, Tensor)\n\n # Extract loss.\n ls = loss.item()\n\n # Calculate the top-1 (standard) accuracy.\n top1 = torch.sum((z[:, :1] == y.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n\n # Calculate the top-5 accuracy\n top5 = torch.sum((z[:, :5] == y.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n\n # Add results to logs.\n self.logs[\"total_loss\"] += ls\n self.logs[\"total_correct\"] += top1\n self.logs[\"total_top5\"] += top5\n\n # Write results to the writer.\n self.write_metric(mode, \"loss\", loss, step_num)\n self.write_metric(mode, \"acc\", top1, step_num)\n self.write_metric(mode, \"top5\", top5, step_num)\n\n # Adds 1 to batch number (step number).\n self.logs[\"batch_num\"] += 1\n\n\nclass SSLLogger(MinervaLogger):\n \"\"\"Logger designed for self-supervised learning.\n\n Attributes:\n logs (dict[str, ~typing.Any]): Dictionary to hold these logged metrics:\n\n * ``batch_num``\n * ``total_loss``\n * ``total_correct``\n * ``total_top5``\n * ``avg_loss``\n * ``avg_output_std``\n\n collapse_level (bool): Adds calculation and logging of the :term:`collapse level` to the metrics.\n Only to be used with Siamese type models.\n euclidean (bool): Adds calculation and logging of the :term:`euclidean distance` to the metrics.\n Only to be used with Siamese type models.\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n out_shape (tuple[int, ...]): Shape of the model output.\n n_classes (int): Number of classes in dataset.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wand_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n \"\"\"\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n out_shape: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super(SSLLogger, self).__init__(\n n_batches,\n batch_size,\n n_samples,\n record_int,\n record_float=record_float,\n writer=writer,\n )\n\n self.logs: Dict[str, Any] = {\n \"batch_num\": 0,\n \"total_loss\": 0.0,\n \"total_correct\": 0.0,\n \"total_top5\": 0.0,\n \"avg_loss\": 0.0,\n \"avg_output_std\": 0.0,\n }\n\n self.collapse_level = kwargs.get(\"collapse_level\", False)\n self.euclidean = kwargs.get(\"euclidean\", False)\n\n if self.collapse_level:\n self.logs[\"collapse_level\"] = 0\n if self.euclidean:\n self.logs[\"euc_dist\"] = 0\n\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Logs the outputs and results from a step of model fitting. Overwrites abstract method.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n z (~torch.Tensor): Optional; Output tensor from the model.\n y (~torch.Tensor): Optional; Labels to assess model output against.\n bbox (~torchgeo.datasets.utils.BoundingBox): Optional; Bounding boxes of the input samples.\n \"\"\"\n assert z is not None\n\n # Adds the loss for this step to the logs.\n ls = loss.item()\n self.logs[\"total_loss\"] += ls\n\n # Compute the TOP1 and TOP5 accuracies.\n sim_argsort = utils.calc_contrastive_acc(z)\n correct = float((sim_argsort == 0).float().mean().cpu().numpy())\n top5 = float((sim_argsort < 5).float().mean().cpu().numpy())\n\n if self.euclidean:\n z_a, z_b = torch.split(z, int(0.5 * len(z)), 0)\n\n euc_dists = []\n for i in range(len(z_a)):\n euc_dists.append(\n utils.calc_norm_euc_dist(\n z_a[i].detach().cpu().numpy(), z_b[i].detach().cpu().numpy()\n )\n )\n\n euc_dist = sum(euc_dists) / len(euc_dists)\n self.write_metric(mode, \"euc_dist\", euc_dist, step_num)\n self.logs[\"euc_dist\"] += euc_dist\n\n if self.collapse_level:\n # calculate the per-dimension standard deviation of the outputs\n # we can use this later to check whether the embeddings are collapsing\n output = torch.split(z, int(0.5 * len(z)), 0)[0].detach()\n output = torch.nn.functional.normalize(output, dim=1)\n\n output_std = torch.std(output, 0) # type: ignore[attr-defined]\n output_std = output_std.mean()\n\n # use moving averages to track the loss and standard deviation\n w = 0.9\n self.logs[\"avg_loss\"] = w * self.logs[\"avg_loss\"] + (1 - w) * ls\n self.logs[\"avg_output_std\"] = (\n w * self.logs[\"avg_output_std\"] + (1 - w) * output_std.item()\n )\n\n # the level of collapse is large if the standard deviation of the l2\n # normalized output is much smaller than 1 / sqrt(dim)\n collapse_level = max(\n 0.0, 1 - math.sqrt(len(output)) * self.logs[\"avg_output_std\"]\n )\n\n self.write_metric(mode, \"collapse_level\", collapse_level, step_num)\n\n self.logs[\"collapse_level\"] = collapse_level\n\n # Add accuracies to log.\n self.logs[\"total_correct\"] += correct\n self.logs[\"total_top5\"] += top5\n\n # Writes the loss to the writer.\n self.write_metric(mode, \"loss\", ls, step_num=step_num)\n self.write_metric(mode, \"acc\", correct / 2 * len(z[0]), step_num)\n self.write_metric(mode, \"top5_acc\", top5 / 2 * len(z[0]), step_num)\n\n # Adds 1 to the batch number (step number).\n self.logs[\"batch_num\"] += 1\n" }, "sourceLanguage": "Python" }, @@ -16412,9 +16765,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3215, + "charLength": 25928, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to handle the pre-training of model and its subsequent downstream task fine-tuning.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport subprocess\nimport sys\nfrom typing import Any, Dict\n\nimport yaml\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(config_path: str):\n with open(config_path) as f:\n config: Dict[str, Any] = yaml.safe_load(f)\n\n for key in config.keys():\n print(\n f\"\\nExecuting {key} experiment + =====================================================================\"\n )\n\n try:\n exit_code = subprocess.Popen( # nosec B602\n f\"python MinervaExp.py -c {config[key]}\",\n shell=True,\n ).wait()\n\n if exit_code != 0:\n raise SystemExit()\n except KeyboardInterrupt as err:\n print(f\"{err}: Skipping to next experiment...\")\n\n except SystemExit as err:\n print(err)\n print(f\"Error in {key} experiment -> ABORT\")\n sys.exit(exit_code) # type: ignore\n\n print(\n f\"\\n{key} experiment COMPLETE + =====================================================================\"\n )\n\n print(\"\\nPipeline COMPLETE\")\n\n\nif __name__ == \"__main__\":\n parser = argparse.ArgumentParser()\n parser.add_argument(\"config_path\", type=str)\n args = parser.parse_args()\n\n main(config_path=args.config_path)\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle the logging of results from various model types.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\nfrom __future__ import annotations\n\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaLogger\",\n \"STGLogger\",\n \"SSLLogger\",\n \"KNNLogger\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nimport math\nfrom abc import ABC\nfrom typing import (\n TYPE_CHECKING,\n Any,\n Callable,\n Dict,\n Optional,\n SupportsFloat,\n Tuple,\n Union,\n)\n\nimport mlflow\nimport numpy as np\nimport torch\nfrom sklearn.metrics import jaccard_score\nfrom torch import Tensor\n\nif TYPE_CHECKING: # pragma: no cover\n from torch.utils.tensorboard.writer import SummaryWriter\n\nfrom torchgeo.datasets.utils import BoundingBox\nfrom wandb.sdk.wandb_run import Run\n\nfrom minerva.utils import utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n_tensorflow_exist = utils.check_optional_import_exist(\"tensorflow\")\nTENSORBOARD_WRITER: Optional[Callable[..., Any]]\ntry:\n TENSORBOARD_WRITER = utils._optional_import(\n \"torch.utils.tensorboard.writer\",\n name=\"SummaryWriter\",\n package=\"tensorflow\",\n )\nexcept ImportError as err: # pragma: no cover\n print(err)\n print(\"Disabling TensorBoard logging\")\n TENSORBOARD_WRITER = None\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaLogger(ABC):\n \"\"\"Base abstract class for all :mod:`minerva` logger classes to ensure intercompatibility with\n :class:`~trainer.Trainer`.\n\n Attributes:\n record_int (bool): Whether to record the integer values from an epoch of model fitting.\n record_float (bool): Whether to record the floating point values from an epoch of model fitting.\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n logs (dict[str, ~typing.Any]): Dictionary to hold the logs from the epoch.\n Logs should be more lightweight than ``results``.\n results (dict[str, ~typing.Any]): Dictionary to hold the results from the epoch.\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wandb_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super(MinervaLogger, self).__init__()\n self.record_int = record_int\n self.record_float = record_float\n self.n_batches = n_batches\n self.batch_size = batch_size\n self.n_samples = n_samples\n self.writer = writer\n\n self.logs: Dict[str, Any] = {}\n self.results: Dict[str, Any] = {}\n\n def __call__(self, mode: str, step_num: int, loss: Tensor, *args) -> None:\n \"\"\"Call :meth:`log`.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n\n Returns:\n None\n \"\"\"\n self.log(mode, step_num, loss, *args)\n\n @abc.abstractmethod\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Abstract logging method, the core functionality of a logger. Must be overwritten.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n z (~torch.Tensor): Optional; Output tensor from the model.\n y (~torch.Tensor): Optional; Labels to assess model output against.\n bbox (~torchgeo.datasets.utils.BoundingBox): Optional; Bounding boxes of the input samples.\n\n Returns:\n None\n \"\"\"\n pass # pragma: no cover\n\n def write_metric(\n self, mode: str, key: str, value: SupportsFloat, step_num: Optional[int] = None\n ):\n \"\"\"Write metric values to logging backends after calculation.\n\n Args:\n mode (str): Mode of model fitting.\n key (str): Key for the metric that ``value`` belongs to.\n value (SupportsFloat): Metric to write to logger.\n step_num (int): Optional; Global step number for this ``mode`` of fitting.\n\n \"\"\"\n # TODO: Are values being reduced across nodes / logged from rank 0?\n if self.writer:\n if _tensorflow_exist:\n if (\n isinstance(\n self.writer, utils.extract_class_type(TENSORBOARD_WRITER)\n )\n and self.writer\n ):\n self.writer.add_scalar( # type: ignore[attr-defined]\n tag=f\"{mode}_{key}\",\n scalar_value=value, # type: ignore[attr-defined]\n global_step=step_num,\n )\n if isinstance(self.writer, Run):\n self.writer.log({f\"{mode}/step\": step_num, f\"{mode}/{key}\": value})\n\n if mlflow.active_run():\n # If running in Azure Machine Learning, tracking URI / experiment ID set already\n # https://learn.microsoft.com/en-us/azure/machine-learning/how-to-use-mlflow-cli-runs?tabs=python%2Cmlflow#creating-a-training-routine # noqa: E501\n mlflow.log_metric(key, value) # pragma: no cover\n\n @property\n def get_logs(self) -> Dict[str, Any]:\n \"\"\"Gets the logs dictionary.\n\n Returns:\n dict[str, ~typing.Any]: Log dictionary of the logger.\n \"\"\"\n return self.logs\n\n @property\n def get_results(self) -> Dict[str, Any]:\n \"\"\"Gets the results dictionary.\n\n Returns:\n dict[str, ~typing.Any]: Results dictionary of the logger.\n \"\"\"\n return self.results\n\n\nclass STGLogger(MinervaLogger):\n \"\"\"Logger designed for supervised learning using :mod:`torchgeo` datasets.\n\n Attributes:\n logs (dict[str, ~typing.Any]): The main logs from the KNN with these metrics:\n\n * ``batch_num``\n * ``total_loss``\n * ``total_correct``\n * ``total_top5``\n\n results (dict[str, ~typing.Any]): Hold these additional, full results from the KNN:\n\n * ``y``\n * ``z``\n * ``probs``\n * ``ids``\n * ``bounds``\n\n calc_miou (bool): Activates the calculating and logging of :term:`MIoU` for segmentation models.\n Places the metric in the ``total_miou`` key of ``logs``.\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n out_shape (int | tuple[int, ...]): Shape of the model output.\n n_classes (int): Number of classes in dataset.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wandb_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n\n Raises:\n MemoryError: If trying to allocate memory to hold the probabilites of predictions\n from the model exceeds capacity.\n MemoryError: If trying to allocate memory to hold the bounding boxes of samples would exceed capacity.\n \"\"\"\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n out_shape: Union[int, Tuple[int, ...]],\n n_classes: int,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super(STGLogger, self).__init__(\n n_batches,\n batch_size,\n n_samples,\n record_int,\n record_float,\n writer,\n )\n _out_shape: Tuple[int, ...]\n\n if isinstance(out_shape, int):\n _out_shape = (out_shape,)\n else:\n _out_shape = out_shape\n\n self.logs: Dict[str, Any] = {\n \"batch_num\": 0,\n \"total_loss\": 0.0,\n \"total_correct\": 0.0,\n }\n\n self.results: Dict[str, Any] = {\n \"y\": None,\n \"z\": None,\n \"probs\": None,\n \"ids\": [],\n \"bounds\": None,\n }\n self.calc_miou = True if kwargs.get(\"model_type\") == \"segmentation\" else False\n\n if self.calc_miou:\n self.logs[\"total_miou\"] = 0.0\n\n # Allocate memory for the integer values to be recorded.\n if self.record_int:\n int_log_shape: Tuple[int, ...]\n if kwargs.get(\"model_type\") == \"scene classifier\":\n int_log_shape = (self.n_batches, self.batch_size)\n else:\n int_log_shape = (self.n_batches, self.batch_size, *_out_shape)\n\n self.results[\"z\"] = np.empty(int_log_shape, dtype=np.uint8)\n self.results[\"y\"] = np.empty(int_log_shape, dtype=np.uint8)\n\n # Allocate memory for the floating point values to be recorded.\n if self.record_float:\n float_log_shape: Tuple[int, ...]\n if kwargs.get(\"model_type\") == \"scene classifier\":\n float_log_shape = (self.n_batches, self.batch_size, n_classes)\n else:\n float_log_shape = (\n self.n_batches,\n self.batch_size,\n n_classes,\n *_out_shape,\n )\n\n try:\n self.results[\"probs\"] = np.empty(float_log_shape, dtype=np.float16)\n except MemoryError: # pragma: no cover\n raise MemoryError(\n \"Dataset too large to record probabilities of predicted classes!\"\n )\n\n try:\n self.results[\"bounds\"] = np.empty(\n (self.n_batches, self.batch_size), dtype=object\n )\n except MemoryError: # pragma: no cover\n raise MemoryError(\n \"Dataset too large to record bounding boxes of samples!\"\n )\n\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Logs the outputs and results from a step of model fitting. Overwrites abstract method.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n z (~torch.Tensor): Output tensor from the model.\n y (~torch.Tensor): Labels to assess model output against.\n bbox (~torchgeo.datasets.utils.BoundingBox): Bounding boxes of the input samples.\n\n Returns:\n None\n \"\"\"\n\n assert z is not None\n assert y is not None\n\n if self.record_int:\n # Arg max the estimated probabilities and add to predictions.\n self.results[\"z\"][self.logs[\"batch_num\"]] = torch.argmax(z, 1).cpu().numpy() # type: ignore[attr-defined]\n\n # Add the labels and sample IDs to lists.\n self.results[\"y\"][self.logs[\"batch_num\"]] = y.cpu().numpy()\n batch_ids = []\n for i in range(\n self.logs[\"batch_num\"] * self.batch_size,\n (self.logs[\"batch_num\"] + 1) * self.batch_size,\n ):\n batch_ids.append(str(i).zfill(len(str(self.n_samples))))\n self.results[\"ids\"].append(batch_ids)\n\n if self.record_float:\n assert bbox is not None\n # Add the estimated probabilities to probs.\n self.results[\"probs\"][self.logs[\"batch_num\"]] = z.detach().cpu().numpy()\n self.results[\"bounds\"][self.logs[\"batch_num\"]] = bbox\n\n # Computes the loss and the correct predictions from this step.\n ls = loss.item()\n correct = (torch.argmax(z, 1) == y).sum().item() # type: ignore[attr-defined]\n\n # Adds loss and correct predictions to logs.\n self.logs[\"total_loss\"] += ls\n self.logs[\"total_correct\"] += correct\n\n if self.calc_miou:\n assert y is not None\n y_true = y.detach().cpu().numpy()\n y_pred = torch.argmax(z, 1).detach().cpu().numpy() # type: ignore[attr-defined]\n miou = 0.0\n for i in range(len(y)):\n miou += float(\n jaccard_score(\n y_true[i].flatten(), y_pred[i].flatten(), average=\"macro\"\n )\n ) # noqa: E501 type: ignore[attr-defined]\n self.logs[\"total_miou\"] += miou\n\n self.write_metric(mode, \"miou\", miou / len(y), step_num=step_num)\n\n # Writes loss and correct predictions to the writer.\n self.write_metric(mode, \"loss\", ls, step_num=step_num)\n self.write_metric(\n mode, \"acc\", correct / len(torch.flatten(y)), step_num=step_num\n )\n\n # Adds 1 to batch number (step number).\n self.logs[\"batch_num\"] += 1\n\n\nclass KNNLogger(MinervaLogger):\n \"\"\"Logger specifically designed for use with the KNN validation in\n :meth:`trainer.Trainer.weighted_knn_validation`.\n\n Attributes:\n logs (dict[str, ~typing.Any]): The main logs from the KNN with these metrics:\n\n * ``batch_num``\n * ``total_loss``\n * ``total_correct``\n * ``total_top5``\n\n results (dict[str, ~typing.Any]): Hold these additional, full results from the KNN:\n\n * ``y``\n * ``z``\n * ``probs``\n * ``ids``\n * ``bounds``\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wand_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n \"\"\"\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super().__init__(\n n_batches, batch_size, n_samples, record_int, record_float, writer, **kwargs\n )\n\n self.logs: Dict[str, Any] = {\n \"batch_num\": 0,\n \"total_loss\": 0.0,\n \"total_correct\": 0.0,\n \"total_top5\": 0.0,\n }\n\n self.results: Dict[str, Any] = {\n \"y\": None,\n \"z\": None,\n \"probs\": None,\n \"ids\": [],\n \"bounds\": None,\n }\n\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n assert isinstance(z, Tensor)\n assert isinstance(y, Tensor)\n\n # Extract loss.\n ls = loss.item()\n\n # Calculate the top-1 (standard) accuracy.\n top1 = torch.sum((z[:, :1] == y.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n\n # Calculate the top-5 accuracy\n top5 = torch.sum((z[:, :5] == y.unsqueeze(dim=-1)).any(dim=-1).float()).item()\n\n # Add results to logs.\n self.logs[\"total_loss\"] += ls\n self.logs[\"total_correct\"] += top1\n self.logs[\"total_top5\"] += top5\n\n # Write results to the writer.\n self.write_metric(mode, \"loss\", loss, step_num)\n self.write_metric(mode, \"acc\", top1, step_num)\n self.write_metric(mode, \"top5\", top5, step_num)\n\n # Adds 1 to batch number (step number).\n self.logs[\"batch_num\"] += 1\n\n\nclass SSLLogger(MinervaLogger):\n \"\"\"Logger designed for self-supervised learning.\n\n Attributes:\n logs (dict[str, ~typing.Any]): Dictionary to hold these logged metrics:\n\n * ``batch_num``\n * ``total_loss``\n * ``total_correct``\n * ``total_top5``\n * ``avg_loss``\n * ``avg_output_std``\n\n collapse_level (bool): Adds calculation and logging of the :term:`collapse level` to the metrics.\n Only to be used with Siamese type models.\n euclidean (bool): Adds calculation and logging of the :term:`euclidean distance` to the metrics.\n Only to be used with Siamese type models.\n\n Args:\n n_batches (int): Number of batches in the epoch.\n batch_size (int): Size of the batch.\n n_samples (int): Total number of samples in the epoch.\n out_shape (tuple[int, ...]): Shape of the model output.\n n_classes (int): Number of classes in dataset.\n record_int (bool): Optional; Whether to record the integer values from an epoch of model fitting.\n Defaults to ``True``.\n record_float (bool): Optional; Whether to record the floating point values from an epoch of model fitting.\n Defaults to ``False``.\n writer (~torch.utils.tensorboard.writer.SummaryWriter | ~wandb.sdk.wand_run.Run): Optional; Writer object\n from :mod:`tensorboard`, a :mod:`wandb` :class:`~wandb.sdk.wandb_run.Run` object or ``None``.\n \"\"\"\n\n def __init__(\n self,\n n_batches: int,\n batch_size: int,\n n_samples: int,\n out_shape: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n record_int: bool = True,\n record_float: bool = False,\n writer: Optional[Union[SummaryWriter, Run]] = None,\n **kwargs,\n ) -> None:\n super(SSLLogger, self).__init__(\n n_batches,\n batch_size,\n n_samples,\n record_int,\n record_float=record_float,\n writer=writer,\n )\n\n self.logs: Dict[str, Any] = {\n \"batch_num\": 0,\n \"total_loss\": 0.0,\n \"total_correct\": 0.0,\n \"total_top5\": 0.0,\n \"avg_loss\": 0.0,\n \"avg_output_std\": 0.0,\n }\n\n self.collapse_level = kwargs.get(\"collapse_level\", False)\n self.euclidean = kwargs.get(\"euclidean\", False)\n\n if self.collapse_level:\n self.logs[\"collapse_level\"] = 0\n if self.euclidean:\n self.logs[\"euc_dist\"] = 0\n\n def log(\n self,\n mode: str,\n step_num: int,\n loss: Tensor,\n z: Optional[Tensor] = None,\n y: Optional[Tensor] = None,\n bbox: Optional[BoundingBox] = None,\n *args,\n **kwargs,\n ) -> None:\n \"\"\"Logs the outputs and results from a step of model fitting. Overwrites abstract method.\n\n Args:\n mode (str): Mode of model fitting.\n step_num (int): The global step number of for the mode of model fitting.\n loss (~torch.Tensor): Loss from this step of model fitting.\n z (~torch.Tensor): Optional; Output tensor from the model.\n y (~torch.Tensor): Optional; Labels to assess model output against.\n bbox (~torchgeo.datasets.utils.BoundingBox): Optional; Bounding boxes of the input samples.\n \"\"\"\n assert z is not None\n\n # Adds the loss for this step to the logs.\n ls = loss.item()\n self.logs[\"total_loss\"] += ls\n\n # Compute the TOP1 and TOP5 accuracies.\n sim_argsort = utils.calc_contrastive_acc(z)\n correct = float((sim_argsort == 0).float().mean().cpu().numpy())\n top5 = float((sim_argsort < 5).float().mean().cpu().numpy())\n\n if self.euclidean:\n z_a, z_b = torch.split(z, int(0.5 * len(z)), 0)\n\n euc_dists = []\n for i in range(len(z_a)):\n euc_dists.append(\n utils.calc_norm_euc_dist(\n z_a[i].detach().cpu().numpy(), z_b[i].detach().cpu().numpy()\n )\n )\n\n euc_dist = sum(euc_dists) / len(euc_dists)\n self.write_metric(mode, \"euc_dist\", euc_dist, step_num)\n self.logs[\"euc_dist\"] += euc_dist\n\n if self.collapse_level:\n # calculate the per-dimension standard deviation of the outputs\n # we can use this later to check whether the embeddings are collapsing\n output = torch.split(z, int(0.5 * len(z)), 0)[0].detach()\n output = torch.nn.functional.normalize(output, dim=1)\n\n output_std = torch.std(output, 0) # type: ignore[attr-defined]\n output_std = output_std.mean()\n\n # use moving averages to track the loss and standard deviation\n w = 0.9\n self.logs[\"avg_loss\"] = w * self.logs[\"avg_loss\"] + (1 - w) * ls\n self.logs[\"avg_output_std\"] = (\n w * self.logs[\"avg_output_std\"] + (1 - w) * output_std.item()\n )\n\n # the level of collapse is large if the standard deviation of the l2\n # normalized output is much smaller than 1 / sqrt(dim)\n collapse_level = max(\n 0.0, 1 - math.sqrt(len(output)) * self.logs[\"avg_output_std\"]\n )\n\n self.write_metric(mode, \"collapse_level\", collapse_level, step_num)\n\n self.logs[\"collapse_level\"] = collapse_level\n\n # Add accuracies to log.\n self.logs[\"total_correct\"] += correct\n self.logs[\"total_top5\"] += top5\n\n # Writes the loss to the writer.\n self.write_metric(mode, \"loss\", ls, step_num=step_num)\n self.write_metric(mode, \"acc\", correct / 2 * len(z[0]), step_num)\n self.write_metric(mode, \"top5_acc\", top5 / 2 * len(z[0]), step_num)\n\n # Adds 1 to the batch number (step number).\n self.logs[\"batch_num\"] += 1\n" } } }, @@ -16427,7 +16780,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "5759e3466baa71903baffed4c3f49cd277d5e701b966c2512c50d4c6f41af28f" + "equalIndicator/v1": "1f86a3134eeae9e96cec7fdfeaeee620606d810c0f77fb2b5de045d6b6f9803b" }, "properties": { "ideaSeverity": "ERROR" @@ -16445,16 +16798,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/transforms.py", + "uri": "minerva/utils/config_load.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 14296, + "charLength": 8690, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module containing custom transforms to be used with :mod:`torchvision.transforms`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"ClassTransform\",\n \"PairCreate\",\n \"Normalise\",\n \"DetachedColorJitter\",\n \"SingleLabel\",\n \"ToRGB\",\n \"MinervaCompose\",\n \"SwapKeys\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, overload\n\nimport torch\nfrom torch import LongTensor, Tensor\nfrom torchvision.transforms import ColorJitter\nfrom torchvision.transforms import functional_tensor as ft\n\nfrom minerva.utils.utils import find_tensor_mode, mask_transform\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass ClassTransform:\n \"\"\"Transform to be applied to a mask to convert from one labelling schema to another.\n\n Attributes:\n transform (dict[int, int]): Mapping from one labelling schema to another.\n\n Args:\n transform (dict[int, int]): Mapping from one labelling schema to another.\n \"\"\"\n\n def __init__(self, transform: Dict[int, int]) -> None:\n self.transform = transform\n\n def __call__(self, mask: LongTensor) -> LongTensor:\n return self.forward(mask)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(transform={self.transform})\"\n\n def forward(self, mask: LongTensor) -> LongTensor:\n \"\"\"Transforms the given mask from the original label schema to the new.\n\n Args:\n mask (~torch.LongTensor): Mask in the original label schema.\n\n Returns:\n ~torch.LongTensor: Mask transformed into new label schema.\n \"\"\"\n transformed: LongTensor = mask_transform(mask, self.transform)\n return transformed\n\n\nclass PairCreate:\n \"\"\"Transform that takes a sample and returns a pair of the same sample.\"\"\"\n\n def __init__(self) -> None:\n pass\n\n def __call__(self, sample: Any) -> Tuple[Any, Any]:\n return self.forward(sample)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}()\"\n\n @staticmethod\n def forward(sample: Any) -> Tuple[Any, Any]:\n \"\"\"Takes a sample and returns it and a copy as a :class:`tuple` pair.\n\n Args:\n sample (~typing.Any): Sample to duplicate.\n\n Returns:\n tuple[~typing.Any, ~typing.Any]: :class:`tuple` of two copies of the sample.\n \"\"\"\n return sample, sample\n\n\nclass Normalise:\n \"\"\"Transform that normalises an image tensor based on the bit size.\n\n Attributes:\n norm_value (int): Value to normalise image with.\n\n Args:\n norm_value (int): Value to normalise image with.\n \"\"\"\n\n def __init__(self, norm_value: int) -> None:\n self.norm_value = norm_value\n\n def __call__(self, img: Tensor) -> Tensor:\n return self.forward(img)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(norm_value={self.norm_value})\"\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"Normalises inputted image using ``norm_value``.\n\n Args:\n img (~torch.Tensor): Image tensor to be normalised. Should have a bit size\n that relates to ``norm_value``.\n\n Returns:\n ~torch.Tensor: Input image tensor normalised by ``norm_value``.\n \"\"\"\n return img / self.norm_value\n\n\nclass DetachedColorJitter(ColorJitter):\n \"\"\"Sends RGB channels of multi-spectral images to be transformed by\n :class:`~torchvision.transforms.ColorJitter`.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"Detaches RGB channels of input image to be sent to :class:`~torchvision.transforms.ColorJitter`.\n\n All other channels bypass :class:`~torchvision.transforms.ColorJitter` and are\n concatenated onto the colour jittered RGB channels.\n\n Args:\n img (~torch.Tensor): Input image.\n\n Raises:\n ValueError: If number of channels of input ``img`` is 2.\n\n Returns:\n ~torch.Tensor: Color jittered image.\n \"\"\"\n channels = ft.get_image_num_channels(img)\n\n jitter_img: Tensor\n if channels > 3:\n rgb_jitter = super().forward(img[:3])\n jitter_img = torch.cat((rgb_jitter, img[3:]), 0) # type: ignore[attr-defined]\n\n elif channels in (1, 3):\n jitter_img = super().forward(img)\n\n else:\n raise ValueError(f\"{channels} channel images are not supported!\")\n\n return jitter_img\n\n def __call__(self, img: Tensor) -> Tensor:\n return self.forward(img)\n\n def __repr__(self) -> Any:\n return super().__repr__()\n\n\nclass ToRGB:\n \"\"\"Reduces the number of channels down to RGB.\n\n Attributes:\n channels (tuple[int, int, int]): Optional; Tuple defining which channels in expected input images\n contain the RGB bands. If ``None``, it is assumed that the RGB bands are in the first 3 channels.\n\n Args:\n channels (tuple[int, int, int]): Optional; Tuple defining which channels in expected input images\n contain the RGB bands. If ``None``, it is assumed that the RGB bands are in the first 3 channels.\n\n .. versionadded:: 0.22\n\n \"\"\"\n\n def __init__(self, channels: Optional[Tuple[int, int, int]] = None) -> None:\n self.channels = channels\n\n def __call__(self, img: Tensor) -> Tensor:\n return self.forward(img)\n\n def __repr__(self) -> str:\n if self.channels:\n return f\"{self.__class__.__name__}(channels --> [{self.channels}])\"\n else:\n return f\"{self.__class__.__name__}(channels --> [0:3])\"\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the transform, returning an RGB image.\n\n Args:\n img (~torch.Tensor): Image to convert to RGB.\n\n Returns:\n ~torch.Tensor: Image of only the RGB channels of ``img``.\n\n Raises:\n ValueError: If ``img`` has less channels than specified in :attr:`~ToRGB.channels`.\n ValueError: If ``img`` has less than 3 channels and :attr:`~ToRGB.channels` is ``None``.\n \"\"\"\n # If a tuple defining the RGB channels was provided, select and concat together.\n if self.channels:\n if len(img) < len(self.channels):\n raise ValueError(\"Image has less channels that trying to reduce to!\")\n\n return torch.stack([img[channel] for channel in self.channels])\n\n # If no channels were provided, assume that that the first 3 channels are the RGB channels.\n else:\n if len(img) < 3:\n raise ValueError(\"Image has less than 3 channels! Cannot be RGB!\")\n\n return img[:3]\n\n\nclass SingleLabel:\n \"\"\"Reduces a mask to a single label using transform mode provided.\n\n Attributes:\n mode (str): Mode of operation.\n\n Args:\n mode (str): Mode of operation. Currently only supports ``\"modal\"``.\n\n .. versionadded:: 0.22\n\n \"\"\"\n\n def __init__(self, mode: str = \"modal\") -> None:\n self.mode = mode\n\n def __call__(self, mask: LongTensor) -> LongTensor:\n return self.forward(mask)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(mode={self.mode})\"\n\n def forward(self, mask: LongTensor) -> LongTensor:\n \"\"\"Forward pass of the transform, reducing the input mask to a single label.\n\n Args:\n mask (~torch.LongTensor): Input mask to reduce to a single label.\n\n Raises:\n NotImplementedError: If :attr:`~SingleLabel.mode` is not ``\"modal\"``.\n\n Returns:\n ~torch.LongTensor: The single label as a 0D, 1-element tensor.\n \"\"\"\n if self.mode == \"modal\":\n return LongTensor([find_tensor_mode(mask)])\n else:\n raise NotImplementedError(\n f\"{self.mode} is not a recognised operating mode!\"\n )\n\n\nclass MinervaCompose:\n \"\"\"Extension of :class:`torchvision.transforms.Compose`. Composes several transforms together.\n\n Designed to work with both :class:`~torch.Tensor` and :mod:`torchgeo` sample :class:`dict`.\n\n This transform does not support torchscript. Please, see the note below.\n\n Args:\n transforms (~typing.Sequence[~typing.Callable[..., ~typing.Any]] | ~typing.Callable[..., ~typing.Any]):\n List of transforms to compose.\n key (str): Optional; For use with :mod:`torchgeo` samples and must be assigned a value if using.\n The key of the data type in the sample dict to transform.\n\n Example:\n >>> transforms.MinervaCompose([\n >>> transforms.CenterCrop(10),\n >>> transforms.PILToTensor(),\n >>> transforms.ConvertImageDtype(torch.float),\n >>> ])\n\n .. note::\n In order to script the transformations, please use :class:`torch.nn.Sequential` as below.\n\n >>> transforms = torch.nn.Sequential(\n >>> transforms.CenterCrop(10),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> )\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with :class:`torch.Tensor`,\n does not require ``lambda`` functions or :class:`pillow.Image`.\n\n \"\"\"\n\n def __init__(\n self,\n transforms: Union[Sequence[Callable[..., Any]], Callable[..., Any]],\n key: Optional[str] = None,\n ) -> None:\n self.transforms = transforms\n self.key = key\n\n @overload\n def __call__(self, sample: Tensor) -> Tensor:\n ... # pragma: no cover\n\n @overload\n def __call__(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n ... # pragma: no cover\n\n def __call__(\n self, sample: Union[Tensor, Dict[str, Any]]\n ) -> Union[Tensor, Dict[str, Any]]:\n if isinstance(sample, Tensor):\n return self._transform_input(sample)\n elif isinstance(sample, dict):\n assert self.key is not None\n sample[self.key] = self._transform_input(sample[self.key])\n return sample\n else:\n raise TypeError(f\"Sample is {type(sample)=}, not Tensor or dict!\")\n\n def _transform_input(self, img: Tensor) -> Tensor:\n if isinstance(self.transforms, Sequence):\n for t in self.transforms:\n img = t(img)\n elif callable(self.transforms):\n img = self.transforms(img)\n\n else:\n raise TypeError(\n f\"`transforms` has type {type(self.transforms)}, not callable\"\n )\n\n return img\n\n def __repr__(self) -> str:\n format_string = self.__class__.__name__ + \"(\"\n\n if isinstance(self.transforms, Sequence):\n for t in self.transforms:\n format_string += \"\\n\"\n format_string += \" {0}\".format(t)\n\n elif callable(self.transforms):\n format_string += \"{0})\".format(self.transforms)\n return format_string\n\n else:\n raise TypeError(\n f\"`transforms` has type {type(self.transforms)}, not callable\"\n )\n\n format_string += \"\\n)\"\n\n return format_string\n\n\nclass SwapKeys:\n \"\"\"Transform to set one key in a :mod:`torchgeo` sample :class:`dict` to another.\n\n Useful for testing autoencoders to predict their input.\n\n Attributes:\n from_key (str): Key for the value to set to ``to_key``.\n to_key (str): Key to set the value from ``from_key`` to.\n\n Args:\n from_key (str): Key for the value to set to ``to_key``.\n to_key (str): Key to set the value from ``from_key`` to.\n\n .. versionadded:: 0.22\n \"\"\"\n\n def __init__(self, from_key: str, to_key: str) -> None:\n self.from_key = from_key\n self.to_key = to_key\n\n def __call__(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n return self.forward(sample)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.from_key} -> {self.to_key})\"\n\n def forward(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Sets the ``to_key`` of ``sample`` to the ``from_key`` and returns.\n\n Args:\n sample (dict[str, ~typing.Any]): Sample dict from :mod:`torchgeo` containing ``from_key``.\n\n Returns:\n dict[str, ~typing.Any]: Sample with ``to_key`` set to the value of ``from_key``.\n \"\"\"\n sample[self.to_key] = sample[self.from_key]\n return sample\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Handles the loading of config files and checking paths.\n\nAttributes:\n DEFAULT_CONF_DIR_PATH (~pathlib.Path): Path to the default config directory.\n DEFAULT_CONFIG_NAME (str): Name of the default, example config.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"DEFAULT_CONF_DIR_PATH\",\n \"DEFAULT_CONFIG_NAME\",\n \"ToDefaultConfDir\",\n \"universal_path\",\n \"check_paths\",\n \"chdir_to_default\",\n \"load_configs\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport yaml\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Default values for the path to the config directory and config name.\nDEFAULT_CONF_DIR_PATH = Path(\"../../inbuilt_cfgs/\")\nDEFAULT_CONFIG_NAME: str = \"example_config.yml\"\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass ToDefaultConfDir:\n \"\"\"Changes to the default config directory. Switches back to the previous CWD on close.\"\"\"\n\n def __init__(self) -> None:\n self._cwd = os.getcwd()\n self._def_dir = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n\n def __enter__(self) -> None:\n os.chdir(self._def_dir)\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.chdir(self._cwd)\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef universal_path(path: Any) -> Path:\n \"\"\"Creates a :class:`~pathlib.Path` object from :class:`str` or :class:`~typing.Iterable` inputs.\n\n Args:\n path (~typing.Any): Representation of a path to convert to :class:`~pathlib.Path` object.\n\n Returns:\n ~pathlib.Path: :class:`~pathlib.Path` object of the input ``path``.\n \"\"\"\n if isinstance(path, Path):\n return path\n elif type(path) == str:\n return Path(path)\n else:\n return Path(*path)\n\n\ndef check_paths(\n config: Optional[Union[str, Path]] = None, use_default_conf_dir: bool = False\n) -> Tuple[str, Optional[str], Optional[Path]]:\n \"\"\"Checks the path given for the config.\n\n Args:\n config (str | ~pathlib.Path | None): Path to the config given from the CLI.\n use_default_conf_dir (bool): Assumes that ``config`` is in the default config directory if ``True``.\n\n Returns:\n tuple[str, ~typing.Optional[str], ~typing.Optional[~pathlib.Path]]: Tuple of the path for\n :func:`load_configs` to use, the config name and path to config.\n \"\"\"\n\n config_name: Optional[str] = None\n config_path: Optional[Path] = None\n\n if config is not None:\n p = Path(config)\n head = p.parent\n tail = p.name\n\n if str(head) != \"\" or str(head) is not None:\n config_path = head\n elif str(head) == \"\" or head is None:\n config_path = Path(\"\")\n\n config_name = tail\n\n # Overwrites the config path if option found in args regardless of -c args.\n if use_default_conf_dir:\n if config_path is not None:\n print(\n \"Warning: Config path specified with `--default_config_dir` option.\"\n + \"\\nDefault config directory path will be used.\"\n )\n config_path = None\n\n # If no config_path, change directory to the default config directory.\n if config_path is None:\n config_name = chdir_to_default(config_name)\n\n # Check the config specified exists at the path given. If not, assume its in the default directory.\n else:\n if config_name is None or not (config_path / config_name).exists():\n config_name = chdir_to_default(config_name)\n else:\n pass\n\n path = config_name\n if config_path is not None and config_path != Path(\"\"):\n path = str(config_path / config_name)\n\n return path, config_name, config_path\n\n\ndef chdir_to_default(config_name: Optional[str] = None) -> str:\n \"\"\"Changes the current working directory to the default config directory.\n\n Args:\n config_name (str): Optional; Name of the config in the default directory. Defaults to None.\n\n Returns:\n str: :data:`DEFAULT_CONFIG_NAME` if ``config_name`` not in default directory. ``config_name`` if it does exist.\n \"\"\"\n\n this_abs_path = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n os.chdir(this_abs_path)\n\n if config_name is None or not Path(config_name).exists():\n return DEFAULT_CONFIG_NAME\n else:\n return config_name\n\n\ndef load_configs(master_config_path: Union[str, Path]) -> Tuple[Dict[str, Any], ...]:\n \"\"\"Loads the master config from ``YAML``. Finds other config paths within and loads them.\n\n Args:\n master_config_path (str): Path to the master config ``YAML`` file.\n\n Returns:\n tuple[dict[str, ~typing.Any], ...]: Master config and any other configs found from paths in the master config.\n \"\"\"\n\n def yaml_load(path: Union[str, Path]) -> Any:\n \"\"\"Loads ``YAML`` file from path as dict.\n Args:\n path(str | ~pathlib.Path): Path to ``YAML`` file.\n\n Returns:\n yml_file (dict): YAML file loaded as dict.\n \"\"\"\n with open(path) as f:\n return yaml.safe_load(f)\n\n def aux_config_load(paths: Dict[str, str]) -> Dict[str, Dict[str, Any]]:\n \"\"\"Loads and returns config files from YAML as dicts.\n\n Args:\n paths (dict[str, str]): Dictionary mapping config names to paths to their ``YAML`` files.\n\n Returns:\n dict[str, dict[str, ~typing.Any]]: Config dictionaries loaded from ``YAML`` from paths.\n \"\"\"\n configs = {}\n for _config_name in paths.keys():\n # Loads config from YAML as dict.\n configs[_config_name] = yaml_load(paths[_config_name])\n return configs\n\n # First loads the master config.\n master_config = yaml_load(master_config_path)\n\n # Gets the paths for the other configs from master config.\n config_paths = master_config[\"dir\"][\"configs\"]\n\n # Loads and returns the other configs along with master config.\n return master_config, aux_config_load(config_paths)\n" }, "sourceLanguage": "Python" }, @@ -16462,9 +16815,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 14296, + "charLength": 8690, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module containing custom transforms to be used with :mod:`torchvision.transforms`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"ClassTransform\",\n \"PairCreate\",\n \"Normalise\",\n \"DetachedColorJitter\",\n \"SingleLabel\",\n \"ToRGB\",\n \"MinervaCompose\",\n \"SwapKeys\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Callable, Dict, Optional, Sequence, Tuple, Union, overload\n\nimport torch\nfrom torch import LongTensor, Tensor\nfrom torchvision.transforms import ColorJitter\nfrom torchvision.transforms import functional_tensor as ft\n\nfrom minerva.utils.utils import find_tensor_mode, mask_transform\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass ClassTransform:\n \"\"\"Transform to be applied to a mask to convert from one labelling schema to another.\n\n Attributes:\n transform (dict[int, int]): Mapping from one labelling schema to another.\n\n Args:\n transform (dict[int, int]): Mapping from one labelling schema to another.\n \"\"\"\n\n def __init__(self, transform: Dict[int, int]) -> None:\n self.transform = transform\n\n def __call__(self, mask: LongTensor) -> LongTensor:\n return self.forward(mask)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(transform={self.transform})\"\n\n def forward(self, mask: LongTensor) -> LongTensor:\n \"\"\"Transforms the given mask from the original label schema to the new.\n\n Args:\n mask (~torch.LongTensor): Mask in the original label schema.\n\n Returns:\n ~torch.LongTensor: Mask transformed into new label schema.\n \"\"\"\n transformed: LongTensor = mask_transform(mask, self.transform)\n return transformed\n\n\nclass PairCreate:\n \"\"\"Transform that takes a sample and returns a pair of the same sample.\"\"\"\n\n def __init__(self) -> None:\n pass\n\n def __call__(self, sample: Any) -> Tuple[Any, Any]:\n return self.forward(sample)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}()\"\n\n @staticmethod\n def forward(sample: Any) -> Tuple[Any, Any]:\n \"\"\"Takes a sample and returns it and a copy as a :class:`tuple` pair.\n\n Args:\n sample (~typing.Any): Sample to duplicate.\n\n Returns:\n tuple[~typing.Any, ~typing.Any]: :class:`tuple` of two copies of the sample.\n \"\"\"\n return sample, sample\n\n\nclass Normalise:\n \"\"\"Transform that normalises an image tensor based on the bit size.\n\n Attributes:\n norm_value (int): Value to normalise image with.\n\n Args:\n norm_value (int): Value to normalise image with.\n \"\"\"\n\n def __init__(self, norm_value: int) -> None:\n self.norm_value = norm_value\n\n def __call__(self, img: Tensor) -> Tensor:\n return self.forward(img)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(norm_value={self.norm_value})\"\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"Normalises inputted image using ``norm_value``.\n\n Args:\n img (~torch.Tensor): Image tensor to be normalised. Should have a bit size\n that relates to ``norm_value``.\n\n Returns:\n ~torch.Tensor: Input image tensor normalised by ``norm_value``.\n \"\"\"\n return img / self.norm_value\n\n\nclass DetachedColorJitter(ColorJitter):\n \"\"\"Sends RGB channels of multi-spectral images to be transformed by\n :class:`~torchvision.transforms.ColorJitter`.\n \"\"\"\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"Detaches RGB channels of input image to be sent to :class:`~torchvision.transforms.ColorJitter`.\n\n All other channels bypass :class:`~torchvision.transforms.ColorJitter` and are\n concatenated onto the colour jittered RGB channels.\n\n Args:\n img (~torch.Tensor): Input image.\n\n Raises:\n ValueError: If number of channels of input ``img`` is 2.\n\n Returns:\n ~torch.Tensor: Color jittered image.\n \"\"\"\n channels = ft.get_image_num_channels(img)\n\n jitter_img: Tensor\n if channels > 3:\n rgb_jitter = super().forward(img[:3])\n jitter_img = torch.cat((rgb_jitter, img[3:]), 0) # type: ignore[attr-defined]\n\n elif channels in (1, 3):\n jitter_img = super().forward(img)\n\n else:\n raise ValueError(f\"{channels} channel images are not supported!\")\n\n return jitter_img\n\n def __call__(self, img: Tensor) -> Tensor:\n return self.forward(img)\n\n def __repr__(self) -> Any:\n return super().__repr__()\n\n\nclass ToRGB:\n \"\"\"Reduces the number of channels down to RGB.\n\n Attributes:\n channels (tuple[int, int, int]): Optional; Tuple defining which channels in expected input images\n contain the RGB bands. If ``None``, it is assumed that the RGB bands are in the first 3 channels.\n\n Args:\n channels (tuple[int, int, int]): Optional; Tuple defining which channels in expected input images\n contain the RGB bands. If ``None``, it is assumed that the RGB bands are in the first 3 channels.\n\n .. versionadded:: 0.22\n\n \"\"\"\n\n def __init__(self, channels: Optional[Tuple[int, int, int]] = None) -> None:\n self.channels = channels\n\n def __call__(self, img: Tensor) -> Tensor:\n return self.forward(img)\n\n def __repr__(self) -> str:\n if self.channels:\n return f\"{self.__class__.__name__}(channels --> [{self.channels}])\"\n else:\n return f\"{self.__class__.__name__}(channels --> [0:3])\"\n\n def forward(self, img: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the transform, returning an RGB image.\n\n Args:\n img (~torch.Tensor): Image to convert to RGB.\n\n Returns:\n ~torch.Tensor: Image of only the RGB channels of ``img``.\n\n Raises:\n ValueError: If ``img`` has less channels than specified in :attr:`~ToRGB.channels`.\n ValueError: If ``img`` has less than 3 channels and :attr:`~ToRGB.channels` is ``None``.\n \"\"\"\n # If a tuple defining the RGB channels was provided, select and concat together.\n if self.channels:\n if len(img) < len(self.channels):\n raise ValueError(\"Image has less channels that trying to reduce to!\")\n\n return torch.stack([img[channel] for channel in self.channels])\n\n # If no channels were provided, assume that that the first 3 channels are the RGB channels.\n else:\n if len(img) < 3:\n raise ValueError(\"Image has less than 3 channels! Cannot be RGB!\")\n\n return img[:3]\n\n\nclass SingleLabel:\n \"\"\"Reduces a mask to a single label using transform mode provided.\n\n Attributes:\n mode (str): Mode of operation.\n\n Args:\n mode (str): Mode of operation. Currently only supports ``\"modal\"``.\n\n .. versionadded:: 0.22\n\n \"\"\"\n\n def __init__(self, mode: str = \"modal\") -> None:\n self.mode = mode\n\n def __call__(self, mask: LongTensor) -> LongTensor:\n return self.forward(mask)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}(mode={self.mode})\"\n\n def forward(self, mask: LongTensor) -> LongTensor:\n \"\"\"Forward pass of the transform, reducing the input mask to a single label.\n\n Args:\n mask (~torch.LongTensor): Input mask to reduce to a single label.\n\n Raises:\n NotImplementedError: If :attr:`~SingleLabel.mode` is not ``\"modal\"``.\n\n Returns:\n ~torch.LongTensor: The single label as a 0D, 1-element tensor.\n \"\"\"\n if self.mode == \"modal\":\n return LongTensor([find_tensor_mode(mask)])\n else:\n raise NotImplementedError(\n f\"{self.mode} is not a recognised operating mode!\"\n )\n\n\nclass MinervaCompose:\n \"\"\"Extension of :class:`torchvision.transforms.Compose`. Composes several transforms together.\n\n Designed to work with both :class:`~torch.Tensor` and :mod:`torchgeo` sample :class:`dict`.\n\n This transform does not support torchscript. Please, see the note below.\n\n Args:\n transforms (~typing.Sequence[~typing.Callable[..., ~typing.Any]] | ~typing.Callable[..., ~typing.Any]):\n List of transforms to compose.\n key (str): Optional; For use with :mod:`torchgeo` samples and must be assigned a value if using.\n The key of the data type in the sample dict to transform.\n\n Example:\n >>> transforms.MinervaCompose([\n >>> transforms.CenterCrop(10),\n >>> transforms.PILToTensor(),\n >>> transforms.ConvertImageDtype(torch.float),\n >>> ])\n\n .. note::\n In order to script the transformations, please use :class:`torch.nn.Sequential` as below.\n\n >>> transforms = torch.nn.Sequential(\n >>> transforms.CenterCrop(10),\n >>> transforms.Normalize((0.485, 0.456, 0.406), (0.229, 0.224, 0.225)),\n >>> )\n >>> scripted_transforms = torch.jit.script(transforms)\n\n Make sure to use only scriptable transformations, i.e. that work with :class:`torch.Tensor`,\n does not require ``lambda`` functions or :class:`pillow.Image`.\n\n \"\"\"\n\n def __init__(\n self,\n transforms: Union[Sequence[Callable[..., Any]], Callable[..., Any]],\n key: Optional[str] = None,\n ) -> None:\n self.transforms = transforms\n self.key = key\n\n @overload\n def __call__(self, sample: Tensor) -> Tensor:\n ... # pragma: no cover\n\n @overload\n def __call__(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n ... # pragma: no cover\n\n def __call__(\n self, sample: Union[Tensor, Dict[str, Any]]\n ) -> Union[Tensor, Dict[str, Any]]:\n if isinstance(sample, Tensor):\n return self._transform_input(sample)\n elif isinstance(sample, dict):\n assert self.key is not None\n sample[self.key] = self._transform_input(sample[self.key])\n return sample\n else:\n raise TypeError(f\"Sample is {type(sample)=}, not Tensor or dict!\")\n\n def _transform_input(self, img: Tensor) -> Tensor:\n if isinstance(self.transforms, Sequence):\n for t in self.transforms:\n img = t(img)\n elif callable(self.transforms):\n img = self.transforms(img)\n\n else:\n raise TypeError(\n f\"`transforms` has type {type(self.transforms)}, not callable\"\n )\n\n return img\n\n def __repr__(self) -> str:\n format_string = self.__class__.__name__ + \"(\"\n\n if isinstance(self.transforms, Sequence):\n for t in self.transforms:\n format_string += \"\\n\"\n format_string += \" {0}\".format(t)\n\n elif callable(self.transforms):\n format_string += \"{0})\".format(self.transforms)\n return format_string\n\n else:\n raise TypeError(\n f\"`transforms` has type {type(self.transforms)}, not callable\"\n )\n\n format_string += \"\\n)\"\n\n return format_string\n\n\nclass SwapKeys:\n \"\"\"Transform to set one key in a :mod:`torchgeo` sample :class:`dict` to another.\n\n Useful for testing autoencoders to predict their input.\n\n Attributes:\n from_key (str): Key for the value to set to ``to_key``.\n to_key (str): Key to set the value from ``from_key`` to.\n\n Args:\n from_key (str): Key for the value to set to ``to_key``.\n to_key (str): Key to set the value from ``from_key`` to.\n\n .. versionadded:: 0.22\n \"\"\"\n\n def __init__(self, from_key: str, to_key: str) -> None:\n self.from_key = from_key\n self.to_key = to_key\n\n def __call__(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n return self.forward(sample)\n\n def __repr__(self) -> str:\n return f\"{self.__class__.__name__}({self.from_key} -> {self.to_key})\"\n\n def forward(self, sample: Dict[str, Any]) -> Dict[str, Any]:\n \"\"\"Sets the ``to_key`` of ``sample`` to the ``from_key`` and returns.\n\n Args:\n sample (dict[str, ~typing.Any]): Sample dict from :mod:`torchgeo` containing ``from_key``.\n\n Returns:\n dict[str, ~typing.Any]: Sample with ``to_key`` set to the value of ``from_key``.\n \"\"\"\n sample[self.to_key] = sample[self.from_key]\n return sample\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Handles the loading of config files and checking paths.\n\nAttributes:\n DEFAULT_CONF_DIR_PATH (~pathlib.Path): Path to the default config directory.\n DEFAULT_CONFIG_NAME (str): Name of the default, example config.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"DEFAULT_CONF_DIR_PATH\",\n \"DEFAULT_CONFIG_NAME\",\n \"ToDefaultConfDir\",\n \"universal_path\",\n \"check_paths\",\n \"chdir_to_default\",\n \"load_configs\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport os\nfrom pathlib import Path\nfrom typing import Any, Dict, Optional, Tuple, Union\n\nimport yaml\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# Default values for the path to the config directory and config name.\nDEFAULT_CONF_DIR_PATH = Path(\"../../inbuilt_cfgs/\")\nDEFAULT_CONFIG_NAME: str = \"example_config.yml\"\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass ToDefaultConfDir:\n \"\"\"Changes to the default config directory. Switches back to the previous CWD on close.\"\"\"\n\n def __init__(self) -> None:\n self._cwd = os.getcwd()\n self._def_dir = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n\n def __enter__(self) -> None:\n os.chdir(self._def_dir)\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.chdir(self._cwd)\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef universal_path(path: Any) -> Path:\n \"\"\"Creates a :class:`~pathlib.Path` object from :class:`str` or :class:`~typing.Iterable` inputs.\n\n Args:\n path (~typing.Any): Representation of a path to convert to :class:`~pathlib.Path` object.\n\n Returns:\n ~pathlib.Path: :class:`~pathlib.Path` object of the input ``path``.\n \"\"\"\n if isinstance(path, Path):\n return path\n elif type(path) == str:\n return Path(path)\n else:\n return Path(*path)\n\n\ndef check_paths(\n config: Optional[Union[str, Path]] = None, use_default_conf_dir: bool = False\n) -> Tuple[str, Optional[str], Optional[Path]]:\n \"\"\"Checks the path given for the config.\n\n Args:\n config (str | ~pathlib.Path | None): Path to the config given from the CLI.\n use_default_conf_dir (bool): Assumes that ``config`` is in the default config directory if ``True``.\n\n Returns:\n tuple[str, ~typing.Optional[str], ~typing.Optional[~pathlib.Path]]: Tuple of the path for\n :func:`load_configs` to use, the config name and path to config.\n \"\"\"\n\n config_name: Optional[str] = None\n config_path: Optional[Path] = None\n\n if config is not None:\n p = Path(config)\n head = p.parent\n tail = p.name\n\n if str(head) != \"\" or str(head) is not None:\n config_path = head\n elif str(head) == \"\" or head is None:\n config_path = Path(\"\")\n\n config_name = tail\n\n # Overwrites the config path if option found in args regardless of -c args.\n if use_default_conf_dir:\n if config_path is not None:\n print(\n \"Warning: Config path specified with `--default_config_dir` option.\"\n + \"\\nDefault config directory path will be used.\"\n )\n config_path = None\n\n # If no config_path, change directory to the default config directory.\n if config_path is None:\n config_name = chdir_to_default(config_name)\n\n # Check the config specified exists at the path given. If not, assume its in the default directory.\n else:\n if config_name is None or not (config_path / config_name).exists():\n config_name = chdir_to_default(config_name)\n else:\n pass\n\n path = config_name\n if config_path is not None and config_path != Path(\"\"):\n path = str(config_path / config_name)\n\n return path, config_name, config_path\n\n\ndef chdir_to_default(config_name: Optional[str] = None) -> str:\n \"\"\"Changes the current working directory to the default config directory.\n\n Args:\n config_name (str): Optional; Name of the config in the default directory. Defaults to None.\n\n Returns:\n str: :data:`DEFAULT_CONFIG_NAME` if ``config_name`` not in default directory. ``config_name`` if it does exist.\n \"\"\"\n\n this_abs_path = (Path(__file__).parent / DEFAULT_CONF_DIR_PATH).resolve()\n os.chdir(this_abs_path)\n\n if config_name is None or not Path(config_name).exists():\n return DEFAULT_CONFIG_NAME\n else:\n return config_name\n\n\ndef load_configs(master_config_path: Union[str, Path]) -> Tuple[Dict[str, Any], ...]:\n \"\"\"Loads the master config from ``YAML``. Finds other config paths within and loads them.\n\n Args:\n master_config_path (str): Path to the master config ``YAML`` file.\n\n Returns:\n tuple[dict[str, ~typing.Any], ...]: Master config and any other configs found from paths in the master config.\n \"\"\"\n\n def yaml_load(path: Union[str, Path]) -> Any:\n \"\"\"Loads ``YAML`` file from path as dict.\n Args:\n path(str | ~pathlib.Path): Path to ``YAML`` file.\n\n Returns:\n yml_file (dict): YAML file loaded as dict.\n \"\"\"\n with open(path) as f:\n return yaml.safe_load(f)\n\n def aux_config_load(paths: Dict[str, str]) -> Dict[str, Dict[str, Any]]:\n \"\"\"Loads and returns config files from YAML as dicts.\n\n Args:\n paths (dict[str, str]): Dictionary mapping config names to paths to their ``YAML`` files.\n\n Returns:\n dict[str, dict[str, ~typing.Any]]: Config dictionaries loaded from ``YAML`` from paths.\n \"\"\"\n configs = {}\n for _config_name in paths.keys():\n # Loads config from YAML as dict.\n configs[_config_name] = yaml_load(paths[_config_name])\n return configs\n\n # First loads the master config.\n master_config = yaml_load(master_config_path)\n\n # Gets the paths for the other configs from master config.\n config_paths = master_config[\"dir\"][\"configs\"]\n\n # Loads and returns the other configs along with master config.\n return master_config, aux_config_load(config_paths)\n" } } }, @@ -16477,7 +16830,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "1638571b204aa2f69961e415636a25e0ca68d85bf766720f75399e3be0888136" + "equalIndicator/v1": "cf86cd430e943c26d97c35bdcc12893b38054edc1c0c8287951c16dc029c5966" }, "properties": { "ideaSeverity": "ERROR" @@ -16495,16 +16848,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/__depreciated.py", + "uri": "minerva/models/fcn.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 11619, + "charLength": 17321, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module for redundant model classes.\"\"\"\n# TODO: Consider removing redundant models.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom collections import OrderedDict\nfrom typing import Any, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom minerva.utils.utils import check_len\n\nfrom .core import MinervaModel, get_output_shape\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MLP(MinervaModel):\n \"\"\"Simple class to construct a Multi-Layer Perceptron (MLP).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with PyTorch functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n input_size (int): Size of the input vector to the network.\n output_size (int): Size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Series of values for the size of each hidden layers within the network.\n Also determines the number of layers other than the required input and output layers.\n network (torch.nn.Sequential): The actual neural network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (int): Optional; Size of the input vector to the network.\n n_classes (int): Optional; Number of classes in input data.\n Determines the size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Optional; Series of values for the size of each hidden layers\n within the network. Also determines the number of layers other than the required input and output layers.\n \"\"\"\n\n def __init__(\n self,\n criterion: Optional[Any] = None,\n input_size: int = 288,\n n_classes: int = 8,\n hidden_sizes: Union[Tuple[int, ...], List[int], int] = (256, 144),\n ) -> None:\n super(MLP, self).__init__(\n criterion=criterion, input_size=(input_size,), n_classes=n_classes\n )\n\n if isinstance(hidden_sizes, int):\n hidden_sizes = (hidden_sizes,)\n self.hidden_sizes = hidden_sizes\n\n self._layers: OrderedDict[str, Module] = OrderedDict()\n\n # Constructs layers of the network based on the input size, the hidden sizes and the number of classes.\n for i in range(len(hidden_sizes)):\n if i == 0:\n self._layers[\"Linear-0\"] = nn.Linear(input_size, hidden_sizes[i])\n else:\n self._layers[f\"Linear-{i}\"] = nn.Linear(\n hidden_sizes[i - 1], hidden_sizes[i]\n )\n\n # Adds ReLu activation after every linear layer.\n self._layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n # Adds the final classification layer.\n self._layers[\"Classification\"] = nn.Linear(hidden_sizes[-1], n_classes)\n\n # Constructs network from the OrderedDict of layers\n self.network = nn.Sequential(self._layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the network.\n\n Can be called directly as a method of :class:`MLP` (e.g. ``model.forward()``)\n or when data is parsed to :class:`MLP` (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor. Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n z = self.network(x)\n assert isinstance(z, Tensor)\n return z\n\n\nclass CNN(MinervaModel):\n \"\"\"Simple class to construct a Convolutional Neural Network (CNN).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with :mod:`torch` functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n flattened_size (int): Length of the vector resulting from the flattening of the output from the convolutional\n network.\n conv_net (torch.nn.Sequential): Convolutional network of the model.\n fc_net (torch.nn.Sequential): Fully connected network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in input data.\n features (tuple[int] | list[int]): Optional; Series of values defining the number of feature maps.\n The length of the list is also used to determine the number of convolutional layers\n in ``conv_net``.\n conv_kernel_size (int | tuple[int, ...]): Optional; Size of all convolutional kernels\n for all channels and layers.\n conv_stride (int | tuple[int, ...]): Optional; Size of all convolutional stride lengths\n for all channels and layers.\n max_kernel_size (int | tuple[int, ...]): Optional; Size of all max-pooling kernels\n for all channels and layers.\n max_stride (int | tuple[int, ...]): Optional; Size of all max-pooling stride lengths\n for all channels and layers.\n \"\"\"\n\n def __init__(\n self,\n criterion,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n n_classes: int = 8,\n features: Union[Tuple[int, ...], List[int]] = (2, 1, 1),\n fc_sizes: Union[Tuple[int, ...], List[int]] = (128, 64),\n conv_kernel_size: Union[int, Tuple[int, ...]] = 3,\n conv_stride: Union[int, Tuple[int, ...]] = 1,\n max_kernel_size: Union[int, Tuple[int, ...]] = 2,\n max_stride: Union[int, Tuple[int, ...]] = 2,\n conv_do: bool = True,\n fc_do: bool = True,\n p_conv_do: float = 0.1,\n p_fc_do: float = 0.5,\n ) -> None:\n super(CNN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n self._conv_layers: OrderedDict[str, Module] = OrderedDict()\n self._fc_layers: OrderedDict[str, Module] = OrderedDict()\n\n # Checks that the kernel sizes and strides match the number of layers defined by features.\n _conv_kernel_size: Sequence[int] = check_len(conv_kernel_size, features)\n _conv_stride: Sequence[int] = check_len(conv_stride, features)\n\n # Constructs the convolutional layers determined by the number of input channels and the features of these.\n assert self.input_size is not None\n for i in range(len(features)):\n if i == 0:\n self._conv_layers[\"Conv-0\"] = nn.Conv2d(\n self.input_size[0],\n features[i],\n _conv_kernel_size[0],\n stride=_conv_stride[0],\n )\n else:\n self._conv_layers[f\"Conv-{i}\"] = nn.Conv2d(\n features[i - 1],\n features[i],\n _conv_kernel_size[i],\n stride=_conv_stride[i],\n )\n\n # Each convolutional layer is followed by max-pooling layer and ReLu activation.\n self._conv_layers[f\"MaxPool-{i}\"] = nn.MaxPool2d(\n kernel_size=max_kernel_size, stride=max_stride\n )\n self._conv_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if conv_do:\n self._conv_layers[f\"DropOut-{i}\"] = nn.Dropout(p_conv_do)\n\n # Construct the convolutional network from the dict of layers.\n self.conv_net = nn.Sequential(self._conv_layers)\n\n # Calculate the input of the Linear layer by sending some fake data through the network\n # and getting the shape of the output.\n out_shape = get_output_shape(self.conv_net, self.input_size)\n\n if type(out_shape) is int: # pragma: no cover\n self.flattened_size = out_shape\n elif isinstance(out_shape, Iterable):\n # Calculate the flattened size of the output from the convolutional network.\n self.flattened_size = int(np.prod(list(out_shape)))\n\n # Constructs the fully connected layers determined by the number of input channels and the features of these.\n for i in range(len(fc_sizes)):\n if i == 0:\n self._fc_layers[\"Linear-0\"] = nn.Linear(\n self.flattened_size, fc_sizes[i]\n )\n else:\n self._fc_layers[f\"Linear-{i}\"] = nn.Linear(fc_sizes[i - 1], fc_sizes[i])\n\n # Each fully connected layer is followed by a ReLu activation.\n self._fc_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if fc_do:\n self._fc_layers[f\"DropOut-{i}\"] = nn.Dropout(p_fc_do)\n\n # Add classification layer.\n assert self.n_classes is not None\n self._fc_layers[\"Classification\"] = nn.Linear(fc_sizes[-1], self.n_classes)\n\n # Create fully connected network.\n self.fc_net = nn.Sequential(self._fc_layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the convolutional network and then the fully connected network.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n # Inputs the data into the convolutional network.\n conv_out = self.conv_net(x)\n\n # Output from convolutional network is flattened and input to the fully connected network for classification.\n z = self.fc_net(conv_out.view(-1, self.flattened_size))\n assert isinstance(z, Tensor)\n return z\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Fully Convolutional Network (FCN) models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n__all__ = [\n \"FCN\",\n \"DCN\",\n \"FCN8ResNet18\",\n \"FCN8ResNet34\",\n \"FCN8ResNet50\",\n \"FCN8ResNet101\",\n \"FCN8ResNet152\",\n \"FCN16ResNet18\",\n \"FCN16ResNet34\",\n \"FCN16ResNet50\",\n \"FCN32ResNet18\",\n \"FCN32ResNet34\",\n \"FCN32ResNet50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Dict, Literal, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\n\nfrom .core import MinervaBackbone, MinervaModel, bilinear_init, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass FCN(MinervaBackbone):\n \"\"\"Base Fully Convolutional Network (FCN) class to be subclassed by FCN variants described in the FCN paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Subclasses :class:`~models.MinervaModel`.\n\n Attributes:\n backbone_name (str): Optional; Name of the backbone within this module to use for the FCN.\n decoder_variant (str): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n backbone (~torch.nn.Module): Backbone of the FCN that takes the imagery input and\n extracts learned representations.\n decoder (~torch.nn.Module): Decoder that takes the learned representations from the backbone encoder\n and de-convolves to output a classification segmentation mask.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in data to be classified.\n batch_size (int): Optional; Number of samples in each batch supplied to the network.\n Only needed for Decoder, not DCN.\n backbone_weight_path (str): Optional; Path to pre-trained weights for the backbone to be loaded.\n freeze_backbone (bool): Freezes the weights on the backbone to prevent end-to-end training\n if using a pre-trained backbone.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the backbone packed up into a dict.\n \"\"\"\n\n backbone_name: str = \"ResNet18\"\n decoder_variant: Literal[\"32\", \"16\", \"8\"] = \"32\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, ...] = (4, 256, 256),\n n_classes: int = 8,\n backbone_weight_path: Optional[str] = None,\n freeze_backbone: bool = False,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(FCN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n # Initialises the selected Minerva backbone.\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, n_classes=n_classes, encoder=True, **backbone_kwargs # type: ignore\n )\n\n # Loads and graphts the pre-trained weights ontop of the backbone if the path is provided.\n if backbone_weight_path is not None: # pragma: no cover\n self.backbone.load_state_dict(torch.load(backbone_weight_path))\n\n # Freezes the weights of backbone to avoid end-to-end training.\n if freeze_backbone:\n self.backbone.requires_grad_(False)\n\n # Determines the output shape of the backbone so the correct input shape is known\n # for the proceeding layers of the network.\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n self.decoder = DCN(\n in_channel=backbone_out_shape[0],\n n_classes=n_classes,\n variant=self.decoder_variant,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the FCN by using the forward methods of the backbone and\n feeding its output into the forward for the decoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass DCN(MinervaModel):\n \"\"\"Generic DCN defined by the FCN paper. Can construct the DCN32, DCN16 or DCN8 variants defined in the paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Attributes:\n variant (~typing.Literal['32', '16', '8']): Defines which DCN variant this object is, altering the\n layers constructed and the computational graph. Will be either ``'32'``, ``'16'`` or ``'8'``.\n See the FCN paper for details on these variants.\n n_classes (int): Number of classes in dataset. Defines number of output classification channels.\n relu (~torch.nn.ReLU): Rectified Linear Unit (ReLU) activation layer to be used throughout the network.\n Conv1x1 (~torch.nn.Conv2d): First Conv1x1 layer acting as input to the network from the final output of\n the encoder and common to all variants.\n bn1 (~torch.nn.BatchNorm2d): First batch norm layer common to all variants that comes after Conv1x1.\n DC32 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 32 for DCN32 variant.\n dbn32 (~torch.nn.BatchNorm2d): Batch norm layer after DC32.\n Conv1x1_x3 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n third layer from the ResNet encoder.\n DC2 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN16 & DCN8 variants.\n dbn2 (~torch.nn.BatchNorm2d): Batch norm layer after DC2.\n DC16 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 16 for DCN16 variant.\n dbn16 (~torch.nn.BatchNorm2d): Batch norm layer after DC16.\n Conv1x1_x2 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n second layer from the ResNet encoder.\n DC4 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN8 variant.\n dbn4 (~torch.nn.BatchNorm2d): Batch norm layer after DC4.\n DC8 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 8 for DCN8 variant.\n dbn8 (~torch.nn.BatchNorm2d): Batch norm layer after DC8.\n\n Args:\n in_channel (int): Optional; Number of channels in the input layer of the network.\n Should match the number of output channels (likely feature maps) from the encoder.\n n_classes (int): Optional; Number of classes in dataset. Defines number of output classification channels.\n variant (~typing.Literal['32', '16', '8']): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n\n def __init__(\n self,\n in_channel: int = 512,\n n_classes: int = 21,\n variant: Literal[\"32\", \"16\", \"8\"] = \"32\",\n ) -> None:\n super(DCN, self).__init__(n_classes=n_classes)\n self.variant: Literal[\"32\", \"16\", \"8\"] = variant\n\n assert type(self.n_classes) is int\n\n # Common to all variants.\n self.relu = nn.ReLU(inplace=True)\n self.Conv1x1 = nn.Conv2d(in_channel, self.n_classes, kernel_size=(1, 1))\n self.bn1 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"32\":\n self.DC32 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(64, 64),\n stride=(32, 32),\n dilation=1,\n padding=(16, 16),\n )\n self.DC32.weight.data = bilinear_init(self.n_classes, self.n_classes, 64)\n self.dbn32 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant in (\"16\", \"8\"):\n self.Conv1x1_x3 = nn.Conv2d(\n int(in_channel / 2), self.n_classes, kernel_size=(1, 1)\n )\n self.DC2 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC2.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn2 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"16\":\n self.DC16 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(32, 32),\n stride=(16, 16),\n dilation=1,\n padding=(8, 8),\n )\n self.DC16.weight.data = bilinear_init(self.n_classes, self.n_classes, 32)\n self.dbn16 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"8\":\n self.Conv1x1_x2 = nn.Conv2d(\n int(in_channel / 4), self.n_classes, kernel_size=(1, 1)\n )\n\n self.DC4 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC4.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn4 = nn.BatchNorm2d(self.n_classes)\n\n self.DC8 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(16, 16),\n stride=(8, 8),\n dilation=1,\n padding=(4, 4),\n )\n self.DC8.weight.data = bilinear_init(self.n_classes, self.n_classes, 16)\n self.dbn8 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n def forward(self, x: Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]) -> Tensor:\n \"\"\"Performs a forward pass of the decoder. Depending on DCN variant, will take multiple inputs\n throughout pass from the encoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]): Input data to network.\n Should be from a backbone that supports output at multiple points e.g ResNet.\n\n Returns:\n ~torch.Tensor: Segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n # Unpack outputs from the ResNet layers.\n x4, x3, x2, *_ = x\n\n # All DCNs have a common 1x1 Conv input block.\n z = self.bn1(self.relu(self.Conv1x1(x4)))\n\n # If DCN32, forward pass through DC32 and DBN32 and return output.\n if self.variant == \"32\":\n z = self.dbn32(self.relu(self.DC32(z)))\n assert isinstance(z, Tensor)\n return z\n\n # Common Conv1x1 layer to DCN16 & DCN8.\n x3 = self.bn1(self.relu(self.Conv1x1_x3(x3)))\n z = self.dbn2(self.relu(self.DC2(z)))\n\n z = z + x3\n\n # If DCN16, forward pass through DCN16 and DBN16 and return output.\n if self.variant == \"16\":\n z = self.dbn16(self.relu(self.DC16(z)))\n assert isinstance(z, Tensor)\n return z\n\n # If DCN8, continue through remaining layers to output.\n else:\n x2 = self.bn1(self.relu(self.Conv1x1_x2(x2)))\n z = self.dbn4(self.relu(self.DC4(z)))\n\n z = z + x2\n\n z = self.dbn8(self.relu(self.DC8(z)))\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass FCN32ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"32\"\n\n\nclass FCN16ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"16\"\n\n\nclass FCN8ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet101(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet101` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet101\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet152(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet152` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet152\"\n decoder_variant = \"8\"\n" }, "sourceLanguage": "Python" }, @@ -16512,9 +16865,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 11619, + "charLength": 17321, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module for redundant model classes.\"\"\"\n# TODO: Consider removing redundant models.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom collections import OrderedDict\nfrom typing import Any, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom minerva.utils.utils import check_len\n\nfrom .core import MinervaModel, get_output_shape\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MLP(MinervaModel):\n \"\"\"Simple class to construct a Multi-Layer Perceptron (MLP).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with PyTorch functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n input_size (int): Size of the input vector to the network.\n output_size (int): Size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Series of values for the size of each hidden layers within the network.\n Also determines the number of layers other than the required input and output layers.\n network (torch.nn.Sequential): The actual neural network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (int): Optional; Size of the input vector to the network.\n n_classes (int): Optional; Number of classes in input data.\n Determines the size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Optional; Series of values for the size of each hidden layers\n within the network. Also determines the number of layers other than the required input and output layers.\n \"\"\"\n\n def __init__(\n self,\n criterion: Optional[Any] = None,\n input_size: int = 288,\n n_classes: int = 8,\n hidden_sizes: Union[Tuple[int, ...], List[int], int] = (256, 144),\n ) -> None:\n super(MLP, self).__init__(\n criterion=criterion, input_size=(input_size,), n_classes=n_classes\n )\n\n if isinstance(hidden_sizes, int):\n hidden_sizes = (hidden_sizes,)\n self.hidden_sizes = hidden_sizes\n\n self._layers: OrderedDict[str, Module] = OrderedDict()\n\n # Constructs layers of the network based on the input size, the hidden sizes and the number of classes.\n for i in range(len(hidden_sizes)):\n if i == 0:\n self._layers[\"Linear-0\"] = nn.Linear(input_size, hidden_sizes[i])\n else:\n self._layers[f\"Linear-{i}\"] = nn.Linear(\n hidden_sizes[i - 1], hidden_sizes[i]\n )\n\n # Adds ReLu activation after every linear layer.\n self._layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n # Adds the final classification layer.\n self._layers[\"Classification\"] = nn.Linear(hidden_sizes[-1], n_classes)\n\n # Constructs network from the OrderedDict of layers\n self.network = nn.Sequential(self._layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the network.\n\n Can be called directly as a method of :class:`MLP` (e.g. ``model.forward()``)\n or when data is parsed to :class:`MLP` (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor. Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n z = self.network(x)\n assert isinstance(z, Tensor)\n return z\n\n\nclass CNN(MinervaModel):\n \"\"\"Simple class to construct a Convolutional Neural Network (CNN).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with :mod:`torch` functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n flattened_size (int): Length of the vector resulting from the flattening of the output from the convolutional\n network.\n conv_net (torch.nn.Sequential): Convolutional network of the model.\n fc_net (torch.nn.Sequential): Fully connected network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in input data.\n features (tuple[int] | list[int]): Optional; Series of values defining the number of feature maps.\n The length of the list is also used to determine the number of convolutional layers\n in ``conv_net``.\n conv_kernel_size (int | tuple[int, ...]): Optional; Size of all convolutional kernels\n for all channels and layers.\n conv_stride (int | tuple[int, ...]): Optional; Size of all convolutional stride lengths\n for all channels and layers.\n max_kernel_size (int | tuple[int, ...]): Optional; Size of all max-pooling kernels\n for all channels and layers.\n max_stride (int | tuple[int, ...]): Optional; Size of all max-pooling stride lengths\n for all channels and layers.\n \"\"\"\n\n def __init__(\n self,\n criterion,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n n_classes: int = 8,\n features: Union[Tuple[int, ...], List[int]] = (2, 1, 1),\n fc_sizes: Union[Tuple[int, ...], List[int]] = (128, 64),\n conv_kernel_size: Union[int, Tuple[int, ...]] = 3,\n conv_stride: Union[int, Tuple[int, ...]] = 1,\n max_kernel_size: Union[int, Tuple[int, ...]] = 2,\n max_stride: Union[int, Tuple[int, ...]] = 2,\n conv_do: bool = True,\n fc_do: bool = True,\n p_conv_do: float = 0.1,\n p_fc_do: float = 0.5,\n ) -> None:\n super(CNN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n self._conv_layers: OrderedDict[str, Module] = OrderedDict()\n self._fc_layers: OrderedDict[str, Module] = OrderedDict()\n\n # Checks that the kernel sizes and strides match the number of layers defined by features.\n _conv_kernel_size: Sequence[int] = check_len(conv_kernel_size, features)\n _conv_stride: Sequence[int] = check_len(conv_stride, features)\n\n # Constructs the convolutional layers determined by the number of input channels and the features of these.\n assert self.input_size is not None\n for i in range(len(features)):\n if i == 0:\n self._conv_layers[\"Conv-0\"] = nn.Conv2d(\n self.input_size[0],\n features[i],\n _conv_kernel_size[0],\n stride=_conv_stride[0],\n )\n else:\n self._conv_layers[f\"Conv-{i}\"] = nn.Conv2d(\n features[i - 1],\n features[i],\n _conv_kernel_size[i],\n stride=_conv_stride[i],\n )\n\n # Each convolutional layer is followed by max-pooling layer and ReLu activation.\n self._conv_layers[f\"MaxPool-{i}\"] = nn.MaxPool2d(\n kernel_size=max_kernel_size, stride=max_stride\n )\n self._conv_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if conv_do:\n self._conv_layers[f\"DropOut-{i}\"] = nn.Dropout(p_conv_do)\n\n # Construct the convolutional network from the dict of layers.\n self.conv_net = nn.Sequential(self._conv_layers)\n\n # Calculate the input of the Linear layer by sending some fake data through the network\n # and getting the shape of the output.\n out_shape = get_output_shape(self.conv_net, self.input_size)\n\n if type(out_shape) is int: # pragma: no cover\n self.flattened_size = out_shape\n elif isinstance(out_shape, Iterable):\n # Calculate the flattened size of the output from the convolutional network.\n self.flattened_size = int(np.prod(list(out_shape)))\n\n # Constructs the fully connected layers determined by the number of input channels and the features of these.\n for i in range(len(fc_sizes)):\n if i == 0:\n self._fc_layers[\"Linear-0\"] = nn.Linear(\n self.flattened_size, fc_sizes[i]\n )\n else:\n self._fc_layers[f\"Linear-{i}\"] = nn.Linear(fc_sizes[i - 1], fc_sizes[i])\n\n # Each fully connected layer is followed by a ReLu activation.\n self._fc_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if fc_do:\n self._fc_layers[f\"DropOut-{i}\"] = nn.Dropout(p_fc_do)\n\n # Add classification layer.\n assert self.n_classes is not None\n self._fc_layers[\"Classification\"] = nn.Linear(fc_sizes[-1], self.n_classes)\n\n # Create fully connected network.\n self.fc_net = nn.Sequential(self._fc_layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the convolutional network and then the fully connected network.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n # Inputs the data into the convolutional network.\n conv_out = self.conv_net(x)\n\n # Output from convolutional network is flattened and input to the fully connected network for classification.\n z = self.fc_net(conv_out.view(-1, self.flattened_size))\n assert isinstance(z, Tensor)\n return z\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Fully Convolutional Network (FCN) models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n__all__ = [\n \"FCN\",\n \"DCN\",\n \"FCN8ResNet18\",\n \"FCN8ResNet34\",\n \"FCN8ResNet50\",\n \"FCN8ResNet101\",\n \"FCN8ResNet152\",\n \"FCN16ResNet18\",\n \"FCN16ResNet34\",\n \"FCN16ResNet50\",\n \"FCN32ResNet18\",\n \"FCN32ResNet34\",\n \"FCN32ResNet50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Dict, Literal, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\n\nfrom .core import MinervaBackbone, MinervaModel, bilinear_init, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass FCN(MinervaBackbone):\n \"\"\"Base Fully Convolutional Network (FCN) class to be subclassed by FCN variants described in the FCN paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Subclasses :class:`~models.MinervaModel`.\n\n Attributes:\n backbone_name (str): Optional; Name of the backbone within this module to use for the FCN.\n decoder_variant (str): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n backbone (~torch.nn.Module): Backbone of the FCN that takes the imagery input and\n extracts learned representations.\n decoder (~torch.nn.Module): Decoder that takes the learned representations from the backbone encoder\n and de-convolves to output a classification segmentation mask.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in data to be classified.\n batch_size (int): Optional; Number of samples in each batch supplied to the network.\n Only needed for Decoder, not DCN.\n backbone_weight_path (str): Optional; Path to pre-trained weights for the backbone to be loaded.\n freeze_backbone (bool): Freezes the weights on the backbone to prevent end-to-end training\n if using a pre-trained backbone.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the backbone packed up into a dict.\n \"\"\"\n\n backbone_name: str = \"ResNet18\"\n decoder_variant: Literal[\"32\", \"16\", \"8\"] = \"32\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, ...] = (4, 256, 256),\n n_classes: int = 8,\n backbone_weight_path: Optional[str] = None,\n freeze_backbone: bool = False,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(FCN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n # Initialises the selected Minerva backbone.\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, n_classes=n_classes, encoder=True, **backbone_kwargs # type: ignore\n )\n\n # Loads and graphts the pre-trained weights ontop of the backbone if the path is provided.\n if backbone_weight_path is not None: # pragma: no cover\n self.backbone.load_state_dict(torch.load(backbone_weight_path))\n\n # Freezes the weights of backbone to avoid end-to-end training.\n if freeze_backbone:\n self.backbone.requires_grad_(False)\n\n # Determines the output shape of the backbone so the correct input shape is known\n # for the proceeding layers of the network.\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n self.decoder = DCN(\n in_channel=backbone_out_shape[0],\n n_classes=n_classes,\n variant=self.decoder_variant,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the FCN by using the forward methods of the backbone and\n feeding its output into the forward for the decoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass DCN(MinervaModel):\n \"\"\"Generic DCN defined by the FCN paper. Can construct the DCN32, DCN16 or DCN8 variants defined in the paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Attributes:\n variant (~typing.Literal['32', '16', '8']): Defines which DCN variant this object is, altering the\n layers constructed and the computational graph. Will be either ``'32'``, ``'16'`` or ``'8'``.\n See the FCN paper for details on these variants.\n n_classes (int): Number of classes in dataset. Defines number of output classification channels.\n relu (~torch.nn.ReLU): Rectified Linear Unit (ReLU) activation layer to be used throughout the network.\n Conv1x1 (~torch.nn.Conv2d): First Conv1x1 layer acting as input to the network from the final output of\n the encoder and common to all variants.\n bn1 (~torch.nn.BatchNorm2d): First batch norm layer common to all variants that comes after Conv1x1.\n DC32 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 32 for DCN32 variant.\n dbn32 (~torch.nn.BatchNorm2d): Batch norm layer after DC32.\n Conv1x1_x3 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n third layer from the ResNet encoder.\n DC2 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN16 & DCN8 variants.\n dbn2 (~torch.nn.BatchNorm2d): Batch norm layer after DC2.\n DC16 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 16 for DCN16 variant.\n dbn16 (~torch.nn.BatchNorm2d): Batch norm layer after DC16.\n Conv1x1_x2 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n second layer from the ResNet encoder.\n DC4 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN8 variant.\n dbn4 (~torch.nn.BatchNorm2d): Batch norm layer after DC4.\n DC8 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 8 for DCN8 variant.\n dbn8 (~torch.nn.BatchNorm2d): Batch norm layer after DC8.\n\n Args:\n in_channel (int): Optional; Number of channels in the input layer of the network.\n Should match the number of output channels (likely feature maps) from the encoder.\n n_classes (int): Optional; Number of classes in dataset. Defines number of output classification channels.\n variant (~typing.Literal['32', '16', '8']): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n\n def __init__(\n self,\n in_channel: int = 512,\n n_classes: int = 21,\n variant: Literal[\"32\", \"16\", \"8\"] = \"32\",\n ) -> None:\n super(DCN, self).__init__(n_classes=n_classes)\n self.variant: Literal[\"32\", \"16\", \"8\"] = variant\n\n assert type(self.n_classes) is int\n\n # Common to all variants.\n self.relu = nn.ReLU(inplace=True)\n self.Conv1x1 = nn.Conv2d(in_channel, self.n_classes, kernel_size=(1, 1))\n self.bn1 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"32\":\n self.DC32 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(64, 64),\n stride=(32, 32),\n dilation=1,\n padding=(16, 16),\n )\n self.DC32.weight.data = bilinear_init(self.n_classes, self.n_classes, 64)\n self.dbn32 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant in (\"16\", \"8\"):\n self.Conv1x1_x3 = nn.Conv2d(\n int(in_channel / 2), self.n_classes, kernel_size=(1, 1)\n )\n self.DC2 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC2.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn2 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"16\":\n self.DC16 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(32, 32),\n stride=(16, 16),\n dilation=1,\n padding=(8, 8),\n )\n self.DC16.weight.data = bilinear_init(self.n_classes, self.n_classes, 32)\n self.dbn16 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"8\":\n self.Conv1x1_x2 = nn.Conv2d(\n int(in_channel / 4), self.n_classes, kernel_size=(1, 1)\n )\n\n self.DC4 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC4.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn4 = nn.BatchNorm2d(self.n_classes)\n\n self.DC8 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(16, 16),\n stride=(8, 8),\n dilation=1,\n padding=(4, 4),\n )\n self.DC8.weight.data = bilinear_init(self.n_classes, self.n_classes, 16)\n self.dbn8 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n def forward(self, x: Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]) -> Tensor:\n \"\"\"Performs a forward pass of the decoder. Depending on DCN variant, will take multiple inputs\n throughout pass from the encoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]): Input data to network.\n Should be from a backbone that supports output at multiple points e.g ResNet.\n\n Returns:\n ~torch.Tensor: Segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n # Unpack outputs from the ResNet layers.\n x4, x3, x2, *_ = x\n\n # All DCNs have a common 1x1 Conv input block.\n z = self.bn1(self.relu(self.Conv1x1(x4)))\n\n # If DCN32, forward pass through DC32 and DBN32 and return output.\n if self.variant == \"32\":\n z = self.dbn32(self.relu(self.DC32(z)))\n assert isinstance(z, Tensor)\n return z\n\n # Common Conv1x1 layer to DCN16 & DCN8.\n x3 = self.bn1(self.relu(self.Conv1x1_x3(x3)))\n z = self.dbn2(self.relu(self.DC2(z)))\n\n z = z + x3\n\n # If DCN16, forward pass through DCN16 and DBN16 and return output.\n if self.variant == \"16\":\n z = self.dbn16(self.relu(self.DC16(z)))\n assert isinstance(z, Tensor)\n return z\n\n # If DCN8, continue through remaining layers to output.\n else:\n x2 = self.bn1(self.relu(self.Conv1x1_x2(x2)))\n z = self.dbn4(self.relu(self.DC4(z)))\n\n z = z + x2\n\n z = self.dbn8(self.relu(self.DC8(z)))\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass FCN32ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"32\"\n\n\nclass FCN16ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"16\"\n\n\nclass FCN8ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet101(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet101` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet101\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet152(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet152` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet152\"\n decoder_variant = \"8\"\n" } } }, @@ -16527,7 +16880,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "bbcfa76d26b002e561bb8b0bb5e3619d383d6e9848f54dd4710a315e7dc9f6cc" + "equalIndicator/v1": "86a296c896ae7f7c507dd6748e296a78771b444eb81387cbfff6cfb064d24d92" }, "properties": { "ideaSeverity": "ERROR" @@ -16545,16 +16898,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/metrics.py", + "uri": "minerva/models/__depreciated.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 13942, + "charLength": 12014, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to calculate the metrics of a model's fitting.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaMetrics\",\n \"SPMetrics\",\n \"SSLMetrics\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom abc import ABC\nfrom typing import Any, Dict, List, Tuple\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaMetrics(ABC):\n \"\"\"Abstract class for metric logging within the :mod:`minerva` framework.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n metric_types: List[str] = []\n special_metric_types: List[str] = []\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n **params,\n ) -> None:\n super(MinervaMetrics, self).__init__()\n\n self.n_batches = n_batches\n self.batch_size = batch_size\n self.data_size = data_size\n\n self.model_type = params.get(\"model_type\", \"scene_classifier\")\n self.sample_pairs = params.get(\"sample_pairs\", False)\n\n self.modes = params.get(\"modes\", [\"train\", \"val\", \"test\"])\n\n if self.sample_pairs:\n self.metric_types += self.special_metric_types\n\n # Creates a dict to hold the loss and accuracy results from training, validation and testing.\n self.metrics: Dict[str, Any] = {}\n for mode in self.modes:\n for metric in self.metric_types:\n self.metrics[f\"{mode}_{metric}\"] = {\"x\": [], \"y\": []}\n\n def __call__(self, mode: str, logs: Dict[str, Any]) -> None:\n self.calc_metrics(mode, logs)\n\n @abc.abstractmethod\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n pass # pragma: no cover\n\n @abc.abstractmethod\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n pass # pragma: no cover\n\n @property\n def get_metrics(self) -> Dict[str, Any]:\n \"\"\"Get the ``metrics`` dictionary.\n\n Returns:\n dict[str, Any]: Metrics dictionary.\n \"\"\"\n return self.metrics\n\n def get_sub_metrics(\n self, pattern: Tuple[str, ...] = (\"train\", \"val\")\n ) -> Dict[str, Any]:\n \"\"\"Gets a subset of the metrics dictionary with keys containing strings in the pattern.\n\n Useful for getting the train and validation metrics for plotting for example.\n\n Args:\n pattern (tuple[str, ...]): Optional; Strings to pattern match the metric keys to be returned.\n Defaults to ``(\"train\", \"val\")``.\n\n Returns:\n dict[str, ~typing.Any]: Subset of ``metrics`` with keys that contained strings in ``pattern``.\n \"\"\"\n sub_metrics = {}\n for key in self.metrics.keys():\n if key.split(\"_\")[0] in pattern:\n sub_metrics[key] = self.metrics[key]\n\n return sub_metrics\n\n @abc.abstractmethod\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n pass # pragma: no cover\n\n\nclass SPMetrics(MinervaMetrics):\n \"\"\"Metric logging for supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types: List[str] = [\"loss\", \"acc\", \"miou\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n **params,\n ) -> None:\n super(SPMetrics, self).__init__(\n n_batches, batch_size, data_size, model_type=model_type\n )\n\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n if logs.get(\"total_miou\") is not None:\n self.metrics[f\"{mode}_miou\"][\"y\"].append(\n logs[\"total_miou\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_miou\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}%\".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.model_type == \"segmentation\":\n msg += \" | mIoU: {}\".format(self.metrics[f\"{mode}_miou\"][\"y\"][epoch_no])\n\n msg += \"\\n\"\n print(msg)\n\n\nclass SSLMetrics(MinervaMetrics):\n \"\"\"Metric logging for self-supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types = [\"loss\", \"acc\", \"top5_acc\"]\n special_metric_types = [\"collapse_level\", \"euc_dist\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n sample_pairs: bool = False,\n **params,\n ) -> None:\n super(SSLMetrics, self).__init__(\n n_batches,\n batch_size,\n data_size,\n model_type=model_type,\n sample_pairs=sample_pairs,\n )\n\n def calc_metrics(self, mode: str, logs) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"y\"].append(logs[\"collapse_level\"])\n self.metrics[f\"{mode}_euc_dist\"][\"y\"].append(\n logs[\"euc_dist\"] / self.n_batches[mode]\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_top5_acc\"][\"x\"].append(epoch_no + 1)\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_euc_dist\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}% | Top5 Accuracy: {}% \".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n self.metrics[f\"{mode}_top5_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.sample_pairs and mode == \"train\":\n msg += \"\\n\"\n\n msg += \"| Collapse Level: {}%\".format(\n self.metrics[f\"{mode}_collapse_level\"][\"y\"][epoch_no] * 100.0\n )\n msg += \"| Avg. Euclidean Distance: {}\".format(\n self.metrics[f\"{mode}_euc_dist\"][\"y\"][epoch_no]\n )\n\n msg += \"\\n\"\n print(msg)\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module for redundant model classes.\"\"\"\n# TODO: Consider removing redundant models.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom collections import OrderedDict\nfrom typing import Any, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom minerva.utils.utils import check_len\n\nfrom .core import MinervaModel, get_output_shape\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MLP(MinervaModel):\n \"\"\"Simple class to construct a Multi-Layer Perceptron (MLP).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with PyTorch functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n input_size (int): Size of the input vector to the network.\n output_size (int): Size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Series of values for the size of each hidden layers within the network.\n Also determines the number of layers other than the required input and output layers.\n network (torch.nn.Sequential): The actual neural network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (int): Optional; Size of the input vector to the network.\n n_classes (int): Optional; Number of classes in input data.\n Determines the size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Optional; Series of values for the size of each hidden layers\n within the network. Also determines the number of layers other than the required input and output layers.\n \"\"\"\n\n def __init__(\n self,\n criterion: Optional[Any] = None,\n input_size: int = 288,\n n_classes: int = 8,\n hidden_sizes: Union[Tuple[int, ...], List[int], int] = (256, 144),\n ) -> None:\n super(MLP, self).__init__(\n criterion=criterion, input_size=(input_size,), n_classes=n_classes\n )\n\n if isinstance(hidden_sizes, int):\n hidden_sizes = (hidden_sizes,)\n self.hidden_sizes = hidden_sizes\n\n self._layers: OrderedDict[str, Module] = OrderedDict()\n\n # Constructs layers of the network based on the input size, the hidden sizes and the number of classes.\n for i in range(len(hidden_sizes)):\n if i == 0:\n self._layers[\"Linear-0\"] = nn.Linear(input_size, hidden_sizes[i])\n else:\n self._layers[f\"Linear-{i}\"] = nn.Linear(\n hidden_sizes[i - 1], hidden_sizes[i]\n )\n\n # Adds ReLu activation after every linear layer.\n self._layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n # Adds the final classification layer.\n self._layers[\"Classification\"] = nn.Linear(hidden_sizes[-1], n_classes)\n\n # Constructs network from the OrderedDict of layers\n self.network = nn.Sequential(self._layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the network.\n\n Can be called directly as a method of :class:`MLP` (e.g. ``model.forward()``)\n or when data is parsed to :class:`MLP` (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor. Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n z = self.network(x)\n assert isinstance(z, Tensor)\n return z\n\n\nclass CNN(MinervaModel):\n \"\"\"Simple class to construct a Convolutional Neural Network (CNN).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with :mod:`torch` functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n flattened_size (int): Length of the vector resulting from the flattening of the output from the convolutional\n network.\n conv_net (torch.nn.Sequential): Convolutional network of the model.\n fc_net (torch.nn.Sequential): Fully connected network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in input data.\n features (tuple[int] | list[int]): Optional; Series of values defining the number of feature maps.\n The length of the list is also used to determine the number of convolutional layers\n in ``conv_net``.\n conv_kernel_size (int | tuple[int, ...]): Optional; Size of all convolutional kernels\n for all channels and layers.\n conv_stride (int | tuple[int, ...]): Optional; Size of all convolutional stride lengths\n for all channels and layers.\n max_kernel_size (int | tuple[int, ...]): Optional; Size of all max-pooling kernels\n for all channels and layers.\n max_stride (int | tuple[int, ...]): Optional; Size of all max-pooling stride lengths\n for all channels and layers.\n \"\"\"\n\n def __init__(\n self,\n criterion,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n n_classes: int = 8,\n features: Union[Tuple[int, ...], List[int]] = (2, 1, 1),\n fc_sizes: Union[Tuple[int, ...], List[int]] = (128, 64),\n conv_kernel_size: Union[int, Tuple[int, ...]] = 3,\n conv_stride: Union[int, Tuple[int, ...]] = 1,\n max_kernel_size: Union[int, Tuple[int, ...]] = 2,\n max_stride: Union[int, Tuple[int, ...]] = 2,\n conv_do: bool = True,\n fc_do: bool = True,\n p_conv_do: float = 0.1,\n p_fc_do: float = 0.5,\n ) -> None:\n super(CNN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n self._conv_layers: OrderedDict[str, Module] = OrderedDict()\n self._fc_layers: OrderedDict[str, Module] = OrderedDict()\n\n # Checks that the kernel sizes and strides match the number of layers defined by features.\n _conv_kernel_size: Sequence[int] = check_len(conv_kernel_size, features)\n _conv_stride: Sequence[int] = check_len(conv_stride, features)\n\n # Constructs the convolutional layers determined by the number of input channels and the features of these.\n assert self.input_size is not None\n for i in range(len(features)):\n if i == 0:\n self._conv_layers[\"Conv-0\"] = nn.Conv2d(\n self.input_size[0],\n features[i],\n _conv_kernel_size[0],\n stride=_conv_stride[0],\n )\n else:\n self._conv_layers[f\"Conv-{i}\"] = nn.Conv2d(\n features[i - 1],\n features[i],\n _conv_kernel_size[i],\n stride=_conv_stride[i],\n )\n\n # Each convolutional layer is followed by max-pooling layer and ReLu activation.\n self._conv_layers[f\"MaxPool-{i}\"] = nn.MaxPool2d(\n kernel_size=max_kernel_size, stride=max_stride\n )\n self._conv_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if conv_do:\n self._conv_layers[f\"DropOut-{i}\"] = nn.Dropout(p_conv_do)\n\n # Construct the convolutional network from the dict of layers.\n self.conv_net = nn.Sequential(self._conv_layers)\n\n # Calculate the input of the Linear layer by sending some fake data through the network\n # and getting the shape of the output.\n out_shape = get_output_shape(self.conv_net, self.input_size)\n\n if type(out_shape) is int: # pragma: no cover\n self.flattened_size = out_shape\n elif isinstance(out_shape, Iterable):\n # Calculate the flattened size of the output from the convolutional network.\n self.flattened_size = int(np.prod(list(out_shape)))\n\n # Constructs the fully connected layers determined by the number of input channels and the features of these.\n for i in range(len(fc_sizes)):\n if i == 0:\n self._fc_layers[\"Linear-0\"] = nn.Linear(\n self.flattened_size, fc_sizes[i]\n )\n else:\n self._fc_layers[f\"Linear-{i}\"] = nn.Linear(fc_sizes[i - 1], fc_sizes[i])\n\n # Each fully connected layer is followed by a ReLu activation.\n self._fc_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if fc_do:\n self._fc_layers[f\"DropOut-{i}\"] = nn.Dropout(p_fc_do)\n\n # Add classification layer.\n assert self.n_classes is not None\n self._fc_layers[\"Classification\"] = nn.Linear(fc_sizes[-1], self.n_classes)\n\n # Create fully connected network.\n self.fc_net = nn.Sequential(self._fc_layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the convolutional network and then the fully connected network.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n # Inputs the data into the convolutional network.\n conv_out = self.conv_net(x)\n\n # Output from convolutional network is flattened and input to the fully connected network for classification.\n z = self.fc_net(conv_out.view(-1, self.flattened_size))\n assert isinstance(z, Tensor)\n return z\n" }, "sourceLanguage": "Python" }, @@ -16562,9 +16915,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 13942, + "charLength": 12014, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to calculate the metrics of a model's fitting.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaMetrics\",\n \"SPMetrics\",\n \"SSLMetrics\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom abc import ABC\nfrom typing import Any, Dict, List, Tuple\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaMetrics(ABC):\n \"\"\"Abstract class for metric logging within the :mod:`minerva` framework.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n metric_types: List[str] = []\n special_metric_types: List[str] = []\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n **params,\n ) -> None:\n super(MinervaMetrics, self).__init__()\n\n self.n_batches = n_batches\n self.batch_size = batch_size\n self.data_size = data_size\n\n self.model_type = params.get(\"model_type\", \"scene_classifier\")\n self.sample_pairs = params.get(\"sample_pairs\", False)\n\n self.modes = params.get(\"modes\", [\"train\", \"val\", \"test\"])\n\n if self.sample_pairs:\n self.metric_types += self.special_metric_types\n\n # Creates a dict to hold the loss and accuracy results from training, validation and testing.\n self.metrics: Dict[str, Any] = {}\n for mode in self.modes:\n for metric in self.metric_types:\n self.metrics[f\"{mode}_{metric}\"] = {\"x\": [], \"y\": []}\n\n def __call__(self, mode: str, logs: Dict[str, Any]) -> None:\n self.calc_metrics(mode, logs)\n\n @abc.abstractmethod\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n pass # pragma: no cover\n\n @abc.abstractmethod\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n pass # pragma: no cover\n\n @property\n def get_metrics(self) -> Dict[str, Any]:\n \"\"\"Get the ``metrics`` dictionary.\n\n Returns:\n dict[str, Any]: Metrics dictionary.\n \"\"\"\n return self.metrics\n\n def get_sub_metrics(\n self, pattern: Tuple[str, ...] = (\"train\", \"val\")\n ) -> Dict[str, Any]:\n \"\"\"Gets a subset of the metrics dictionary with keys containing strings in the pattern.\n\n Useful for getting the train and validation metrics for plotting for example.\n\n Args:\n pattern (tuple[str, ...]): Optional; Strings to pattern match the metric keys to be returned.\n Defaults to ``(\"train\", \"val\")``.\n\n Returns:\n dict[str, ~typing.Any]: Subset of ``metrics`` with keys that contained strings in ``pattern``.\n \"\"\"\n sub_metrics = {}\n for key in self.metrics.keys():\n if key.split(\"_\")[0] in pattern:\n sub_metrics[key] = self.metrics[key]\n\n return sub_metrics\n\n @abc.abstractmethod\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n pass # pragma: no cover\n\n\nclass SPMetrics(MinervaMetrics):\n \"\"\"Metric logging for supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types: List[str] = [\"loss\", \"acc\", \"miou\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n **params,\n ) -> None:\n super(SPMetrics, self).__init__(\n n_batches, batch_size, data_size, model_type=model_type\n )\n\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n if logs.get(\"total_miou\") is not None:\n self.metrics[f\"{mode}_miou\"][\"y\"].append(\n logs[\"total_miou\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_miou\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}%\".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.model_type == \"segmentation\":\n msg += \" | mIoU: {}\".format(self.metrics[f\"{mode}_miou\"][\"y\"][epoch_no])\n\n msg += \"\\n\"\n print(msg)\n\n\nclass SSLMetrics(MinervaMetrics):\n \"\"\"Metric logging for self-supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types = [\"loss\", \"acc\", \"top5_acc\"]\n special_metric_types = [\"collapse_level\", \"euc_dist\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n sample_pairs: bool = False,\n **params,\n ) -> None:\n super(SSLMetrics, self).__init__(\n n_batches,\n batch_size,\n data_size,\n model_type=model_type,\n sample_pairs=sample_pairs,\n )\n\n def calc_metrics(self, mode: str, logs) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"y\"].append(logs[\"collapse_level\"])\n self.metrics[f\"{mode}_euc_dist\"][\"y\"].append(\n logs[\"euc_dist\"] / self.n_batches[mode]\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_top5_acc\"][\"x\"].append(epoch_no + 1)\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_euc_dist\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}% | Top5 Accuracy: {}% \".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n self.metrics[f\"{mode}_top5_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.sample_pairs and mode == \"train\":\n msg += \"\\n\"\n\n msg += \"| Collapse Level: {}%\".format(\n self.metrics[f\"{mode}_collapse_level\"][\"y\"][epoch_no] * 100.0\n )\n msg += \"| Avg. Euclidean Distance: {}\".format(\n self.metrics[f\"{mode}_euc_dist\"][\"y\"][epoch_no]\n )\n\n msg += \"\\n\"\n print(msg)\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module for redundant model classes.\"\"\"\n# TODO: Consider removing redundant models.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom collections import OrderedDict\nfrom typing import Any, Iterable, List, Optional, Sequence, Tuple, Union\n\nimport numpy as np\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom minerva.utils.utils import check_len\n\nfrom .core import MinervaModel, get_output_shape\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MLP(MinervaModel):\n \"\"\"Simple class to construct a Multi-Layer Perceptron (MLP).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with PyTorch functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n input_size (int): Size of the input vector to the network.\n output_size (int): Size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Series of values for the size of each hidden layers within the network.\n Also determines the number of layers other than the required input and output layers.\n network (torch.nn.Sequential): The actual neural network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (int): Optional; Size of the input vector to the network.\n n_classes (int): Optional; Number of classes in input data.\n Determines the size of the output vector of the network.\n hidden_sizes (tuple[int] | list[int]): Optional; Series of values for the size of each hidden layers\n within the network. Also determines the number of layers other than the required input and output layers.\n \"\"\"\n\n def __init__(\n self,\n criterion: Optional[Any] = None,\n input_size: int = 288,\n n_classes: int = 8,\n hidden_sizes: Union[Tuple[int, ...], List[int], int] = (256, 144),\n ) -> None:\n super(MLP, self).__init__(\n criterion=criterion, input_size=(input_size,), n_classes=n_classes\n )\n\n if isinstance(hidden_sizes, int):\n hidden_sizes = (hidden_sizes,)\n self.hidden_sizes = hidden_sizes\n\n self._layers: OrderedDict[str, Module] = OrderedDict()\n\n # Constructs layers of the network based on the input size, the hidden sizes and the number of classes.\n for i in range(len(hidden_sizes)):\n if i == 0:\n self._layers[\"Linear-0\"] = nn.Linear(input_size, hidden_sizes[i])\n else:\n self._layers[f\"Linear-{i}\"] = nn.Linear(\n hidden_sizes[i - 1], hidden_sizes[i]\n )\n\n # Adds ReLu activation after every linear layer.\n self._layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n # Adds the final classification layer.\n self._layers[\"Classification\"] = nn.Linear(hidden_sizes[-1], n_classes)\n\n # Constructs network from the OrderedDict of layers\n self.network = nn.Sequential(self._layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the network.\n\n Can be called directly as a method of :class:`MLP` (e.g. ``model.forward()``)\n or when data is parsed to :class:`MLP` (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor. Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n z = self.network(x)\n assert isinstance(z, Tensor)\n return z\n\n\nclass CNN(MinervaModel):\n \"\"\"Simple class to construct a Convolutional Neural Network (CNN).\n\n Inherits from :class:`~torch.nn.Module` and :class:`MinervaModel`. Designed for use with :mod:`torch` functionality.\n\n Should be used in tandem with :class:`~trainer.Trainer`.\n\n Attributes:\n flattened_size (int): Length of the vector resulting from the flattening of the output from the convolutional\n network.\n conv_net (torch.nn.Sequential): Convolutional network of the model.\n fc_net (torch.nn.Sequential): Fully connected network of the model.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in input data.\n features (tuple[int] | list[int]): Optional; Series of values defining the number of feature maps.\n The length of the list is also used to determine the number of convolutional layers\n in ``conv_net``.\n conv_kernel_size (int | tuple[int, ...]): Optional; Size of all convolutional kernels\n for all channels and layers.\n conv_stride (int | tuple[int, ...]): Optional; Size of all convolutional stride lengths\n for all channels and layers.\n max_kernel_size (int | tuple[int, ...]): Optional; Size of all max-pooling kernels\n for all channels and layers.\n max_stride (int | tuple[int, ...]): Optional; Size of all max-pooling stride lengths\n for all channels and layers.\n \"\"\"\n\n def __init__(\n self,\n criterion,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n n_classes: int = 8,\n features: Union[Tuple[int, ...], List[int]] = (2, 1, 1),\n fc_sizes: Union[Tuple[int, ...], List[int]] = (128, 64),\n conv_kernel_size: Union[int, Tuple[int, ...]] = 3,\n conv_stride: Union[int, Tuple[int, ...]] = 1,\n max_kernel_size: Union[int, Tuple[int, ...]] = 2,\n max_stride: Union[int, Tuple[int, ...]] = 2,\n conv_do: bool = True,\n fc_do: bool = True,\n p_conv_do: float = 0.1,\n p_fc_do: float = 0.5,\n ) -> None:\n super(CNN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n self._conv_layers: OrderedDict[str, Module] = OrderedDict()\n self._fc_layers: OrderedDict[str, Module] = OrderedDict()\n\n # Checks that the kernel sizes and strides match the number of layers defined by features.\n _conv_kernel_size: Sequence[int] = check_len(conv_kernel_size, features)\n _conv_stride: Sequence[int] = check_len(conv_stride, features)\n\n # Constructs the convolutional layers determined by the number of input channels and the features of these.\n assert self.input_size is not None\n for i in range(len(features)):\n if i == 0:\n self._conv_layers[\"Conv-0\"] = nn.Conv2d(\n self.input_size[0],\n features[i],\n _conv_kernel_size[0],\n stride=_conv_stride[0],\n )\n else:\n self._conv_layers[f\"Conv-{i}\"] = nn.Conv2d(\n features[i - 1],\n features[i],\n _conv_kernel_size[i],\n stride=_conv_stride[i],\n )\n\n # Each convolutional layer is followed by max-pooling layer and ReLu activation.\n self._conv_layers[f\"MaxPool-{i}\"] = nn.MaxPool2d(\n kernel_size=max_kernel_size, stride=max_stride\n )\n self._conv_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if conv_do:\n self._conv_layers[f\"DropOut-{i}\"] = nn.Dropout(p_conv_do)\n\n # Construct the convolutional network from the dict of layers.\n self.conv_net = nn.Sequential(self._conv_layers)\n\n # Calculate the input of the Linear layer by sending some fake data through the network\n # and getting the shape of the output.\n out_shape = get_output_shape(self.conv_net, self.input_size)\n\n if type(out_shape) is int: # pragma: no cover\n self.flattened_size = out_shape\n elif isinstance(out_shape, Iterable):\n # Calculate the flattened size of the output from the convolutional network.\n self.flattened_size = int(np.prod(list(out_shape)))\n\n # Constructs the fully connected layers determined by the number of input channels and the features of these.\n for i in range(len(fc_sizes)):\n if i == 0:\n self._fc_layers[\"Linear-0\"] = nn.Linear(\n self.flattened_size, fc_sizes[i]\n )\n else:\n self._fc_layers[f\"Linear-{i}\"] = nn.Linear(fc_sizes[i - 1], fc_sizes[i])\n\n # Each fully connected layer is followed by a ReLu activation.\n self._fc_layers[f\"ReLu-{i}\"] = nn.ReLU()\n\n if fc_do:\n self._fc_layers[f\"DropOut-{i}\"] = nn.Dropout(p_fc_do)\n\n # Add classification layer.\n assert self.n_classes is not None\n self._fc_layers[\"Classification\"] = nn.Linear(fc_sizes[-1], self.n_classes)\n\n # Create fully connected network.\n self.fc_net = nn.Sequential(self._fc_layers)\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the convolutional network and then the fully connected network.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: Tensor of the likelihoods the network places on the input ``x`` being of each class.\n \"\"\"\n # Inputs the data into the convolutional network.\n conv_out = self.conv_net(x)\n\n # Output from convolutional network is flattened and input to the fully connected network for classification.\n z = self.fc_net(conv_out.view(-1, self.flattened_size))\n assert isinstance(z, Tensor)\n return z\n" } } }, @@ -16577,7 +16930,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "b56316699501b0a9a1700c0b7657d1494e8ea7294c423220d5679af9237367f6" + "equalIndicator/v1": "6d280a8d1d1b0e20922c046a90996b40f10e95e7db7d9307b5b3e1785267d1c3" }, "properties": { "ideaSeverity": "ERROR" @@ -16595,16 +16948,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/utils/runner.py", + "uri": "minerva/utils/visutils.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 18579, + "charLength": 51811, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle generic functionality for running :mod:`minerva` scripts.\n\nAttributes:\n GENERIC_PARSER (~argparse.ArgumentParser): A standard argparser with arguments for use in :mod:`minerva`.\n Can be used as the basis for a user defined extended argparser.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"GENERIC_PARSER\",\n \"WandbConnectionManager\",\n \"setup_wandb_run\",\n \"config_env_vars\",\n \"config_args\",\n \"distributed_run\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nimport signal\nimport subprocess\nfrom argparse import Namespace\nfrom typing import Any, Callable, Optional, Union\n\nimport requests\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom wandb.sdk.lib import RunDisabled\nfrom wandb.sdk.wandb_run import Run\n\nimport wandb\nfrom minerva.utils import CONFIG, MASTER_PARSER, utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# ---+ CLI +--------------------------------------------------------------+\nGENERIC_PARSER = argparse.ArgumentParser(parents=[MASTER_PARSER])\n\nGENERIC_PARSER.add_argument(\n \"-o\",\n \"--override\",\n dest=\"override\",\n action=\"store_true\",\n help=\"Override config arguments with the CLI arguments where they overlap.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--seed\",\n dest=\"seed\",\n type=int,\n default=42,\n help=\"Set seed number\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-name\",\n dest=\"model_name\",\n type=str,\n help=\"Name of model.\"\n + \" Sub-string before hyphen is taken as model class name.\"\n + \" Sub-string past hyphen can be used to differeniate between versions.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-type\",\n dest=\"model_type\",\n type=str,\n help=\"Type of model. Should be 'segmentation', 'scene_classifier', 'siamese' or 'mlp'\",\n choices=(\"segmentation\", \"ssl\", \"siamese\", \"scene_classifier\", \"mlp\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--max_epochs\",\n dest=\"max_epochs\",\n type=int,\n default=100,\n help=\"Maximum number of training epochs.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--batch-size\",\n dest=\"batch_size\",\n type=int,\n default=8,\n help=\"Number of samples in each batch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--lr\",\n dest=\"lr\",\n type=float,\n default=0.01,\n help=\"Learning rate of the optimiser.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--optim-func\",\n dest=\"optim_func\",\n type=str,\n default=\"SGD\",\n help=\"Name of the optimiser to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``optim_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--loss-func\",\n dest=\"loss_func\",\n type=str,\n default=\"CrossEntropyLoss\",\n help=\"Name of the loss function to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``loss_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--pre-train\",\n dest=\"pre_train\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--fine-tune\",\n dest=\"fine_tune\",\n action=\"store_true\",\n help=\"Sets experiment type to fine-tune. Will load pre-trained backbone from file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--eval\",\n dest=\"eval\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--balance\",\n dest=\"balance\",\n action=\"store_true\",\n help=\"Activates class balancing.\"\n + \" Depending on `model_type`, this will either be via sampling or weighting of the loss function.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--class-elim\",\n dest=\"elim\",\n action=\"store_true\",\n help=\"Eliminates classes that are specified in config but not present in the data.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--sample-pairs\",\n dest=\"sample_pairs\",\n action=\"store_true\",\n help=\"Use paired sampling. E.g. For Siamese models.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-model\",\n dest=\"save_model\",\n type=str,\n default=False,\n help=\"Whether to save the model at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically save the model to file.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--run-tensorboard\",\n dest=\"run_tensorboard\",\n type=str,\n default=False,\n help=\"Whether to run the Tensorboard logs at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically locate and run the logs on a local browser.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-plots-no\",\n dest=\"save\",\n action=\"store_false\",\n help=\"Plots created will not be saved to file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--show-plots\",\n dest=\"show\",\n action=\"store_true\",\n help=\"Show plots created in a window.\"\n + \" Warning: Do not use with a terminal-less operation, e.g. SLURM.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--print-dist\",\n dest=\"p_dist\",\n action=\"store_true\",\n help=\"Print the distribution of classes within the data to `stdout`.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--plot-last-epoch\",\n dest=\"plot_last_epoch\",\n action=\"store_true\",\n help=\"Plot the results from the final validation epoch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log\",\n dest=\"wandb_log\",\n action=\"store_true\",\n help=\"Activate Weights and Biases logging.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--project_name\",\n dest=\"project\",\n type=str,\n help=\"Name of the Weights and Biases project this experiment belongs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-entity\",\n dest=\"entity\",\n type=str,\n help=\"The Weights and Biases entity to send runs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-dir\",\n dest=\"wandb_dir\",\n type=str,\n default=\"./wandb\",\n help=\"Where to store the Weights and Biases logs locally.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log-all\",\n dest=\"log_all\",\n action=\"store_true\",\n help=\"Will log each process on Weights and Biases. Otherwise, logging will be performed from the master process.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--knn-k\",\n dest=\"knn_k\",\n type=int,\n default=200,\n help=\"Top k most similar images used to predict the image for KNN validation.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--val-freq\",\n dest=\"val_freq\",\n type=int,\n default=5,\n help=\"Perform a validation epoch with KNN for every ``val_freq``\"\n + \"training epochs for SSL or Siamese models.\",\n)\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass WandbConnectionManager:\n \"\"\"Checks for a connection to :mod:`wandb`. If not, sets :mod:`wandb` to offline during context.\"\"\"\n\n def __init__(self) -> None:\n try:\n requests.head(\"http://www.wandb.ai/\", timeout=0.1)\n self._on = True\n except requests.ConnectionError:\n self._on = False\n\n def __enter__(self) -> None:\n if self._on:\n os.environ[\"WANDB_MODE\"] = \"online\"\n else:\n os.environ[\"WANDB_MODE\"] = \"offline\"\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.environ[\"WANDB_MODE\"] = \"online\"\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}',\n shell=True,\n )\n exit()\n\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n\n\ndef setup_wandb_run(gpu: int, args: Namespace) -> Optional[Union[Run, RunDisabled]]:\n \"\"\"Sets up a :mod:`wandb` logger for either every process, the master process or not if not logging.\n\n Note:\n ``args`` must contain these keys:\n\n * ``wandb_log`` (bool): Activate :mod:`wandb` logging.\n * | ``log_all`` (bool): :mod:`wandb` logging on every process if ``True``.\n | Only log on master process if ``False``.\n * ``entity`` (str): :mod:`wandb` entity where to send runs to.\n * ``project`` (str): Name of the :mod:`wandb` project this experiment belongs to.\n * ``world_size`` (int): Total number of processes across the experiment.\n\n Args:\n gpu (int): Local process (GPU) number.\n args (~argparse.Namespace): CLI arguments from :mod:`argparse`.\n\n Returns:\n ~wandb.sdk.wandb_run.Run | ~wandb.sdk.lib.RunDisabled | None: The :mod:`wandb` run object\n for this process or ``None`` if ``log_all=False`` and ``rank!=0``.\n \"\"\"\n run: Optional[Union[Run, RunDisabled]] = None\n if CONFIG.get(\"wandb_log\", False) or CONFIG.get(\"project\", None):\n try:\n if CONFIG.get(\"log_all\", False) and args.world_size > 1:\n run = wandb.init( # pragma: no cover\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n group=CONFIG.get(\"group\", \"DDP\"),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n else:\n if gpu == 0:\n run = wandb.init(\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n CONFIG[\"wandb_log\"] = True\n except wandb.UsageError: # type: ignore[attr-defined] # pragma: no cover\n print(\n \"wandb API Key has not been inited.\",\n \"\\nEither call wandb.login(key=[your_api_key]) or use `wandb login` in the shell.\",\n \"\\nOr if not using wandb, safely ignore this message.\",\n )\n else:\n print(\"Weights and Biases logging OFF\")\n\n return run\n\n\ndef config_env_vars(args: Namespace) -> Namespace:\n \"\"\"Finds SLURM environment variables (if they exist) and configures args accordingly.\n\n If SLURM variables are found in the environment variables, the arguments are configured for a SLURM job:\n\n * ``args.rank`` is set to the ``SLURM_NODEID * args.ngpus_per_node``.\n * ``args.world_size`` is set to ``SLURM_NNODES * args.ngpus_per_node``.\n * ``args.dist_url`` is set to ``tcp://{host_name}:58472``\n\n If SLURM variables are not detected, the arguments are configured for a single-node job:\n\n * ``args.rank=0``.\n * ``args.world_size=args.ngpus_per_node``.\n * ``args.dist_url = \"tcp://localhost:58472\"``.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n if \"SLURM_JOB_ID\" in os.environ: # pragma: no cover\n # Single-node and multi-node distributed training on SLURM cluster.\n # Requeue job on SLURM preemption.\n signal.signal(signal.SIGUSR1, _handle_sigusr1)\n signal.signal(signal.SIGTERM, _handle_sigterm)\n\n # Get SLURM variables.\n slurm_job_nodelist: Optional[str] = os.getenv(\"SLURM_JOB_NODELIST\")\n slurm_nodeid: Optional[str] = os.getenv(\"SLURM_NODEID\")\n slurm_nnodes: Optional[str] = os.getenv(\"SLURM_NNODES\")\n slurm_jobid: Optional[str] = os.getenv(\"SLURM_JOB_ID\")\n\n # Check that SLURM variables have been found.\n assert slurm_job_nodelist is not None\n assert slurm_nodeid is not None\n assert slurm_nnodes is not None\n assert slurm_jobid is not None\n\n # Find a common host name on all nodes.\n # Assume scontrol returns hosts in the same order on all nodes.\n cmd = \"scontrol show hostnames \" + slurm_job_nodelist\n stdout = subprocess.check_output(cmd.split())\n host_name = stdout.decode().splitlines()[0]\n args.rank = int(slurm_nodeid) * args.ngpus_per_node\n args.world_size = int(slurm_nnodes) * args.ngpus_per_node\n args.dist_url = f\"tcp://{host_name}:58472\"\n args.jobid = slurm_jobid\n\n else:\n # Single-node distributed training.\n args.rank = 0\n args.dist_url = \"tcp://localhost:58472\"\n args.world_size = args.ngpus_per_node\n args.jobid = None\n\n return args\n\n\ndef config_args(args: Namespace) -> Namespace:\n \"\"\"Prepare the arguments generated from the :mod:`argparse` CLI for the job run.\n\n * Finds and sets ``args.ngpus_per_node``;\n * updates the ``CONFIG`` with new arguments from the CLI;\n * sets the seeds from the seed found in ``CONFIG`` or from CLI;\n * uses :func:`config_env_vars` to determine the correct arguments for distributed computing jobs e.g. SLURM.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n args.ngpus_per_node = torch.cuda.device_count()\n\n # Convert CLI arguments to dict.\n args_dict = vars(args)\n\n # Find which CLI arguments are not in the config.\n new_args = {key: args_dict[key] for key in args_dict if key not in CONFIG}\n\n # Updates the config with new arguments from the CLI.\n CONFIG.update(new_args)\n\n # Overrides the arguments from the config with those of the CLI where they overlap.\n # WARNING: This will include the use of the default CLI arguments.\n if args_dict.get(\"override\"): # pragma: no cover\n updated_args = {\n key: args_dict[key]\n for key in args_dict\n if args_dict[key] != CONFIG[key] and args_dict[key] is not None\n }\n CONFIG.update(updated_args)\n\n # Get seed from config.\n seed = CONFIG.get(\"seed\", 42)\n\n # Set torch, numpy and inbuilt seeds for reproducibility.\n utils.set_seeds(seed)\n\n return config_env_vars(args)\n\n\ndef _run_preamble(\n gpu: int, run: Callable[[int, Namespace], Any], args: Namespace\n) -> None: # pragma: no cover\n # Calculates the global rank of this process.\n args.rank += gpu\n\n # Setups the `wandb` run for this process.\n args.wandb_run = setup_wandb_run(gpu, args)\n\n if args.world_size > 1:\n dist.init_process_group( # type: ignore[attr-defined]\n backend=\"gloo\",\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n print(f\"INITIALISED PROCESS ON {args.rank}\")\n\n if torch.cuda.is_available():\n torch.cuda.set_device(gpu)\n torch.backends.cudnn.benchmark = True # type: ignore\n\n # Start this process run.\n run(gpu, args)\n\n\ndef distributed_run(run: Callable[[int, Namespace], Any], args: Namespace) -> None:\n \"\"\"Runs the supplied function and arguments with distributed computing according to arguments.\n\n :func:`_run_preamble` adds some additional commands to initialise the process group for each run\n and allocating the GPU device number to use before running the supplied function.\n\n Note:\n ``args`` must contain the attributes ``rank``, ``world_size`` and ``dist_url``. These can be\n configured using :func:`config_env_vars` or :func:`config_args`.\n\n Args:\n run (~typing.Callable[[int, ~argparse.Namespace], ~typing.Any]): Function to run with distributed computing.\n args (~argparse.Namespace): Arguments for the run and to specify the variables for distributed computing.\n \"\"\"\n if args.world_size <= 1:\n # Setups up the `wandb` run.\n args.wandb_run = setup_wandb_run(0, args)\n\n # Run the experiment.\n run(0, args)\n\n else: # pragma: no cover\n try:\n mp.spawn(_run_preamble, (run, args), args.ngpus_per_node) # type: ignore[attr-defined]\n except KeyboardInterrupt:\n dist.destroy_process_group() # type: ignore[attr-defined]\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n# TODO: Reduce boilerplate.\n#\n\"\"\"Module to visualise .tiff images, label masks and results from the fitting of neural networks for remote sensing.\n\nAttributes:\n DATA_CONFIG (dict): Config defining the properties of the data used in the experiment.\n IMAGERY_CONFIG (dict): Config defining the properties of the imagery used in the experiment.\n DATA_DIR (list[str] | str): Path to directory holding dataset.\n BAND_IDS (dict): Band IDs and position in sample image.\n MAX_PIXEL_VALUE (int): Maximum pixel value (e.g. 255 for 8-bit integer).\n WGS84 (~rasterio.crs.CRS): WGS84 co-ordinate reference system acting as a\n default :class:`~rasterio.crs.CRS` for transformations.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"DATA_CONFIG\",\n \"IMAGERY_CONFIG\",\n \"DATA_DIR\",\n \"BAND_IDS\",\n \"MAX_PIXEL_VALUE\",\n \"WGS84\",\n \"de_interlace\",\n \"dec_extent_to_deg\",\n \"get_mlp_cmap\",\n \"discrete_heatmap\",\n \"stack_rgb\",\n \"make_rgb_image\",\n \"labelled_rgb_image\",\n \"make_gif\",\n \"prediction_plot\",\n \"seg_plot\",\n \"plot_subpopulations\",\n \"plot_history\",\n \"make_confusion_matrix\",\n \"make_roc_curves\",\n \"plot_embedding\",\n \"format_plot_names\",\n \"plot_results\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport os\nimport random\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport imageio\nimport matplotlib as mlp\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom alive_progress import alive_bar\nfrom matplotlib import offsetbox\nfrom matplotlib.colors import Colormap, ListedColormap\nfrom matplotlib.gridspec import GridSpec\nfrom matplotlib.image import AxesImage\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.transforms import Bbox\nfrom nptyping import Float, Int, NDArray, Shape\nfrom numpy.typing import ArrayLike\nfrom rasterio.crs import CRS\nfrom scipy import stats\nfrom sklearn.metrics import ConfusionMatrixDisplay\nfrom torchgeo.datasets.utils import BoundingBox\n\nfrom minerva.utils import AUX_CONFIGS, CONFIG, universal_path, utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\nDATA_CONFIG = AUX_CONFIGS.get(\"data_config\")\nIMAGERY_CONFIG = AUX_CONFIGS[\"imagery_config\"]\n\n# Path to directory holding dataset.\nDATA_DIR = CONFIG[\"dir\"][\"data\"]\n\n# Band IDs and position in sample image.\nBAND_IDS = IMAGERY_CONFIG[\"data_specs\"][\"band_ids\"]\n\n# Maximum pixel value (e.g. 255 for 8-bit integer).\nMAX_PIXEL_VALUE = IMAGERY_CONFIG[\"data_specs\"][\"max_value\"]\n\nWGS84 = CRS.from_epsg(4326)\n\n# Automatically fixes the layout of the figures to accommodate the colour bar legends.\nplt.rcParams[\"figure.constrained_layout.use\"] = True\n\n# Increases DPI to avoid strange plotting errors for class heatmaps.\nplt.rcParams[\"figure.dpi\"] = 300\nplt.rcParams[\"savefig.dpi\"] = 300\n\n# Removes margin in x-axis of plots.\nplt.rcParams[\"axes.xmargin\"] = 0\n\n# Filters out all TensorFlow messages other than errors.\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n_MAX_SAMPLES = 25\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef de_interlace(x: Sequence[Any], f: int) -> NDArray[Any, Any]:\n \"\"\"Separates interlaced arrays, ``x`` at a frequency of ``f`` from each other.\n\n Args:\n x (~typing.Sequence[~typing.Any]): Array of data to be de-interlaced.\n f (int): Frequency at which interlacing occurs. Equivalent to number of sources interlaced together.\n\n Returns:\n ~numpy.ndarray[~typing.Any]: De-interlaced array. Each source array is now sequentially connected.\n \"\"\"\n new_x: List[NDArray[Any, Any]] = []\n for i in range(f):\n x_i = []\n for j in np.arange(start=i, stop=len(x), step=f):\n x_i.append(x[j])\n new_x.append(np.array(x_i).flatten())\n\n return np.array(new_x).flatten()\n\n\ndef dec_extent_to_deg(\n shape: Tuple[int, int],\n bounds: BoundingBox,\n src_crs: CRS,\n new_crs: CRS = WGS84,\n spacing: int = 32,\n) -> Tuple[Tuple[int, int, int, int], NDArray[Any, Float], NDArray[Any, Float]]:\n \"\"\"Gets the extent of the image with ``shape`` and with ``bounds`` in latitude, longitude of system ``new_crs``.\n\n Args:\n shape (tuple[int, int]): 2D shape of image to be used to define the extents of the composite image.\n bounds (~torchgeo.datasets.utils.BoundingBox): Object describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n src_crs (~rasterio.crs.CRS): Source co-ordinate reference system (CRS).\n new_crs (~rasterio.crs.CRS): Optional; The co-ordinate reference system (CRS) to transform to.\n spacing (int): Spacing of the lat - lon ticks.\n\n Returns:\n tuple[tuple[int, int, int, int], ~numpy.ndarray[float], ~numpy.ndarray[float]]:\n * The corners of the image in pixel co-ordinates e.g. ``(0, 256, 0, 256)``.\n * The latitude extent of the image with ticks at intervals defined by ``spacing``.\n * The longitude extent of the image with ticks at intervals defined by ``spacing``.\n \"\"\"\n # Defines the 'extent' for a composite image based on the size of shape.\n extent = 0, shape[0], 0, shape[1]\n\n # Gets the co-ordinates of the corners of the image in decimal lat-lon.\n corners = utils.transform_coordinates(\n x=[bounds.minx, bounds.maxx],\n y=[bounds.miny, bounds.maxy],\n src_crs=src_crs,\n new_crs=new_crs,\n )\n\n # Creates a discrete mapping of the spaced ticks to latitude longitude extent of the image.\n lat_extent = np.around(\n np.linspace(\n start=corners[1][0],\n stop=corners[1][1],\n num=int(shape[0] / spacing) + 1,\n endpoint=True,\n ),\n decimals=3,\n )\n lon_extent = np.around(\n np.linspace(\n start=corners[0][0],\n stop=corners[0][1],\n num=int(shape[0] / spacing) + 1,\n endpoint=True,\n ),\n decimals=3,\n )\n\n return extent, lat_extent, lon_extent\n\n\ndef get_mlp_cmap(\n cmap_style: Optional[Union[Colormap, str]] = None, n_classes: Optional[int] = None\n) -> Optional[Colormap]:\n \"\"\"Creates a cmap from query\n\n Args:\n cmap_style (~matplotlib.colors.Colormap | str): Optional; :mod:`matplotlib` colourmap style to get.\n n_classes (int): Optional; Number of classes in data to assign colours to.\n\n Returns:\n ~matplotlib.colors.Colormap | None:\n * If ``cmap_style`` and ``n_classes`` provided, returns a :class:`~matplotlib.colors.ListedColormap` instance.\n * If ``cmap_style`` provided but no ``n_classes``, returns a :class:`~matplotlib.colors.Colormap` instance.\n * If neither arguments are provided, ``None`` is returned.\n \"\"\"\n cmap: Optional[Colormap] = None\n\n if cmap_style:\n if isinstance(cmap_style, str):\n cmap = mlp.colormaps[cmap_style] # type: ignore\n else:\n cmap = cmap_style\n\n if n_classes:\n assert isinstance(cmap, Colormap)\n cmap = cmap.resampled(n_classes) # type: ignore\n\n return cmap\n\n\ndef discrete_heatmap(\n data: NDArray[Shape[\"*, *\"], Int], # noqa: F722\n classes: Union[List[str], Tuple[str, ...]],\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n block_size: int = 32,\n) -> None:\n \"\"\"Plots a heatmap with a discrete colour bar. Designed for Radiant Earth MLHub 256x256 SENTINEL images.\n\n Args:\n data (~numpy.ndarray[int]): 2D Array of data to be plotted as a heat map.\n classes (list[str]): Optional; List of all possible class labels.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n block_size (int): Optional; Size of block image subdivision in pixels.\n \"\"\"\n # Initialises a figure.\n plt.figure()\n\n # Creates a cmap from query.\n cmap = get_mlp_cmap(cmap_style, len(classes))\n\n # Plots heatmap onto figure.\n heatmap = plt.imshow(data, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5) # type: ignore[arg-type]\n\n # Sets tick intervals to block size. Default 32 x 32.\n plt.xticks(np.arange(0, data.shape[0] + 1, block_size))\n plt.yticks(np.arange(0, data.shape[1] + 1, block_size))\n\n # Add grid overlay.\n plt.grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n # Plots colour bar onto figure.\n clb = plt.colorbar(heatmap, ticks=np.arange(0, len(classes)), shrink=0.77)\n\n # Sets colour bar ticks to class labels.\n clb.ax.set_yticklabels(classes)\n\n # Display figure.\n plt.show(block=False)\n\n # Close figure.\n plt.close()\n\n\ndef stack_rgb(\n image: NDArray[Shape[\"3, *, *\"], Float], # noqa: F722\n rgb: Dict[str, int] = BAND_IDS,\n max_value: int = MAX_PIXEL_VALUE,\n) -> NDArray[Shape[\"*, *, 3\"], Float]: # noqa: F722\n \"\"\"Stacks together red, green and blue image bands to create a RGB array.\n\n Args:\n image (~numpy.ndarray[float]): Image of separate channels to be normalised\n and reshaped into stacked RGB image.\n rgb (dict[str, int]): Optional; Dictionary of which channels in image are the R, G & B bands.\n max_value (int): Optional; The maximum pixel value in ``image``. e.g. for 8 bit this will be 255.\n\n Returns:\n ~numpy.ndarray[float]: Normalised and stacked red, green, blue arrays into RGB array.\n \"\"\"\n\n # Extract R, G, B bands from image and normalise.\n channels: List[Any] = []\n for channel in [\"R\", \"G\", \"B\"]:\n band = image[rgb[channel]] / max_value\n channels.append(band)\n\n # Stack together RGB bands.\n # Note that it has to be order BGR not RGB due to the order numpy stacks arrays.\n rgb_image: NDArray[Shape[\"3, *, *\"], Any] = np.dstack( # noqa: F722\n (channels[2], channels[1], channels[0])\n )\n assert isinstance(rgb_image, np.ndarray)\n return rgb_image\n\n\ndef make_rgb_image(\n image: NDArray[Shape[\"3, *, *\"], Float], # noqa: F722\n rgb: Dict[str, int],\n block_size: int = 32,\n) -> AxesImage:\n \"\"\"Creates an RGB image from a composition of red, green and blue bands.\n\n Args:\n image (~numpy.ndarray[int]): Array representing the image of shape ``(bands x height x width)``.\n rgb (dict[str, int]): Dictionary of channel numbers of R, G & B bands within ``image``.\n block_size (int): Optional; Size of block image sub-division in pixels.\n\n Returns:\n ~matplotlib.image.AxesImage: Plotted RGB image object.\n \"\"\"\n # Stack RGB image data together.\n rgb_image_array = stack_rgb(image, rgb)\n\n # Create RGB image.\n rgb_image = plt.imshow(rgb_image_array)\n\n # Sets tick intervals to block size. Default 32 x 32.\n plt.xticks(np.arange(0, rgb_image_array.shape[0] + 1, block_size))\n plt.yticks(np.arange(0, rgb_image_array.shape[1] + 1, block_size))\n\n # Add grid overlay.\n plt.grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n plt.show(block=False)\n\n return rgb_image\n\n\ndef labelled_rgb_image(\n image: NDArray[Shape[\"*, *, 3\"], Float], # noqa: F722\n mask: NDArray[Shape[\"*, *\"], Int], # noqa: F722\n bounds: BoundingBox,\n src_crs: CRS,\n path: Union[str, Path],\n name: str,\n classes: Union[List[str], Tuple[str, ...]],\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n new_crs: Optional[CRS] = WGS84,\n block_size: int = 32,\n alpha: float = 0.5,\n show: bool = True,\n save: bool = True,\n figdim: Tuple[Union[int, float], Union[int, float]] = (8.02, 10.32),\n) -> Path:\n \"\"\"Produces a layered image of an RGB image, and it's associated label mask heat map alpha blended on top.\n\n Args:\n image (~numpy.ndarray[int]): Array representing the image of shape ``(height x width x bands)``.\n mask (~numpy.ndarray[int]): Ground truth mask. Should be of shape (height x width) matching ``image``.\n bounds (~torchgeo.datasets.utils.BoundingBox): Object describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n src_crs (~rasterio.crs.CRS): Source co-ordinate reference system (CRS).\n path (str): Path to where to save created figure.\n name (str): Name of figure. Will be used for title and in the filename.\n classes (list[str]): Optional; List of all possible class labels.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n new_crs (~rasterio.crs.CRS): Optional; The co-ordinate reference system (CRS) to transform to.\n block_size (int): Optional; Size of block image subdivision in pixels.\n alpha (float): Optional; Fraction determining alpha blending of label mask.\n show (bool): Optional; Show the figure when plotted.\n save (bool): Optional; Save the figure to ``path``.\n figdim (tuple[int | float, int | float]): Optional; Figure (height, width) in inches.\n\n Returns:\n str: Path to figure save location.\n \"\"\"\n # Checks that the mask and image shapes will align.\n mask_shape: Tuple[int, int] = mask.shape # type: ignore[assignment]\n assert mask_shape == image.shape[:2]\n\n assert new_crs is not None\n\n # Gets the extent of the image in pixel, lattitude and longitude dimensions.\n extent, lat_extent, lon_extent = dec_extent_to_deg(\n mask_shape,\n bounds=bounds,\n src_crs=src_crs,\n spacing=block_size,\n new_crs=new_crs,\n )\n\n # Initialises a figure.\n fig, ax1 = plt.subplots()\n\n # Create RGB image.\n ax1.imshow(image, extent=extent)\n\n # Creates a cmap from query.\n cmap = get_mlp_cmap(cmap_style, len(classes))\n\n # Plots heatmap onto figure.\n heatmap = ax1.imshow(\n mask, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5, extent=extent, alpha=alpha # type: ignore[arg-type]\n )\n\n # Sets tick intervals to standard 32x32 block size.\n ax1.set_xticks(np.arange(0, mask.shape[0] + 1, block_size))\n ax1.set_yticks(np.arange(0, mask.shape[1] + 1, block_size))\n\n # Creates a secondary x and y-axis to hold lat-lon.\n ax2 = ax1.twiny().twinx()\n\n # Plots an invisible line across the diagonal of the image to create the secondary axis for lat-lon.\n ax2.plot(\n lon_extent,\n lat_extent,\n \" \",\n clip_box=Bbox.from_extents(\n lon_extent[0], lat_extent[0], lon_extent[-1], lat_extent[-1]\n ),\n )\n\n # Set ticks for lat-lon.\n ax2.set_xticks(lon_extent)\n ax2.set_yticks(lat_extent)\n\n # Sets the limits of the secondary axis, so they should align with the primary.\n ax2.set_xlim(left=lon_extent[0], right=lon_extent[-1])\n ax2.set_ylim(top=lat_extent[-1], bottom=lat_extent[0])\n\n # Converts the decimal lat-lon into degrees, minutes, seconds to label the axis.\n lat_labels = utils.dec2deg(lat_extent, axis=\"lat\")\n lon_labels = utils.dec2deg(lon_extent, axis=\"lon\")\n\n # Sets the secondary axis tick labels.\n ax2.set_xticklabels(lon_labels, fontsize=11)\n ax2.set_yticklabels(lat_labels, fontsize=10, rotation=-30, ha=\"left\")\n\n # Add grid overlay.\n ax1.grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n # Plots colour bar onto figure.\n clb = plt.colorbar(\n heatmap, ticks=np.arange(0, len(classes)), shrink=0.9, aspect=75, drawedges=True\n )\n\n # Sets colour bar ticks to class labels.\n clb.ax.set_yticklabels(classes, fontsize=11)\n\n # Bodge to get a figure title by using the colour bar title.\n clb.ax.set_title(f\"{name}\\nLand Cover\", loc=\"left\", fontsize=15)\n\n # Set axis labels.\n ax1.set_xlabel(\"(x) - Pixel Position\", fontsize=14)\n ax1.set_ylabel(\"(y) - Pixel Position\", fontsize=14)\n ax2.set_ylabel(\"Latitude\", fontsize=14, rotation=270, labelpad=12)\n ax2.set_title(\"Longitude\") # Bodge\n\n # Manual trial and error fig size which fixes aspect ratio issue.\n fig.set_figheight(figdim[0])\n fig.set_figwidth(figdim[1])\n\n # Display figure.\n if show:\n plt.show(block=False)\n\n # Path and file name of figure.\n fn = Path(f\"{path}/{name}_RGBHM.png\")\n\n # If true, save file to fn.\n if save:\n # Checks if file already exists. Deletes if true.\n utils.exist_delete_check(fn)\n\n # Save figure to fn.\n fig.savefig(fn)\n\n # Close figure.\n plt.close()\n\n return fn\n\n\ndef make_gif(\n dates: Sequence[str],\n images: NDArray[Shape[\"*, *, *, 3\"], Any], # noqa: F722\n masks: NDArray[Shape[\"*, *, *\"], Any], # noqa: F722\n bounds: BoundingBox,\n src_crs: CRS,\n classes: Union[List[str], Tuple[str, ...]],\n gif_name: str,\n path: Union[str, Path],\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n fps: float = 1.0,\n new_crs: Optional[CRS] = WGS84,\n alpha: float = 0.5,\n figdim: Tuple[Union[int, float], Union[int, float]] = (8.02, 10.32),\n) -> None:\n \"\"\"Wrapper to :func:`labelled_rgb_image` to make a GIF for a patch out of scenes.\n\n Args:\n dates (~typing.Sequence[str]): Dates of scenes to be used as the frames in the GIF.\n images (~numpy.ndarray[~typing.Any]): All the frames of imagery to make the GIF from.\n Leading dimension must be the same length as ``dates`` and ``masks``.\n masks (~numpy.ndarray[~typing.Any]): The masks for each frame of the GIF.\n Leading dimension must be the same length as ``dates`` and ``image``.\n bounds (~torchgeo.datasets.utils.BoundingBox): The bounding box (in the ``src_crs`` CRS) of the\n :term:`patch` the ``GIF`` will be of.\n src_crs (~rasterio.crs.CRS): Source co-ordinate reference system (CRS).\n classes (list[str]): List of all possible class labels.\n gif_name (str): Path to and name of GIF to be made.\n path (~pathlib.Path | str]): Path to where to save frames of the ``GIF``.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n fps (float): Optional; Frames per second of ``GIF``.\n new_crs (~rasterio.crs.CRS): Optional; The co-ordinate reference system (CRS) to transform to.\n alpha (float): Optional; Fraction determining alpha blending of label mask.\n figdim (tuple[int | float, int | float]): Optional; Figure (height, width) in inches.\n\n Returns:\n None\n \"\"\"\n # Changes to `imagio` now mean we need the duration of the GIF and not the `fps`.\n duration = len(dates) / fps\n\n # Initialise progress bar.\n with alive_bar(len(dates), bar=\"blocks\") as bar:\n # List to hold filenames and paths of images created.\n frames = []\n for i in range(len(dates)):\n # Update progress bar with current scene.\n bar.text(\"SCENE ON %s\" % dates[i])\n\n # Create a frame of the GIF for a scene of the patch.\n frame = labelled_rgb_image(\n images[i],\n masks[i],\n bounds,\n src_crs,\n path,\n name=f\"{i}\",\n classes=classes,\n cmap_style=cmap_style,\n new_crs=new_crs,\n alpha=alpha,\n save=True,\n show=False,\n figdim=figdim,\n )\n\n # Read in frame just created and add to list of frames.\n frames.append(imageio.imread(frame))\n\n # Update bar with step completion.\n bar()\n\n # Checks GIF doesn't already exist. Deletes if it does.\n utils.exist_delete_check(gif_name)\n\n # Create a 'unknown' bar to 'spin' while the GIF is created.\n with alive_bar(unknown=\"waves\") as bar:\n # Add current operation to spinner bar.\n bar.text(\"MAKING PATCH GIF\")\n\n # Create GIF.\n imageio.mimwrite(gif_name, frames, format=\".gif\", duration=duration) # type: ignore\n\n\ndef prediction_plot(\n sample: Dict[str, Any],\n sample_id: str,\n classes: Dict[int, str],\n src_crs: CRS,\n new_crs: CRS = WGS84,\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n exp_id: Optional[str] = None,\n fig_dim: Optional[Tuple[Union[int, float], Union[int, float]]] = None,\n block_size: int = 32,\n show: bool = True,\n save: bool = True,\n fn_prefix: Optional[Union[str, Path]] = None,\n) -> None:\n \"\"\"\n Produces a figure containing subplots of the predicted label mask, the ground truth label mask\n and a reference RGB image of the same patch.\n\n Args:\n sample (dict[str, ~typing.Any]): Dictionary holding the ``\"image\"``, ground truth (``\"mask\"``)\n and predicted (``\"pred\"``) masks and the bounding box for this sample.\n sample_id (str): ID for the sample.\n classes (dict[int, str]): Dictionary mapping class labels to class names.\n src_crs (~rasterio.crs.CRS): Existing co-ordinate system of the image.\n new_crs(~rasterio.crs.CRS): Optional; Co-ordinate system to convert image to and use for labelling.\n exp_id (str): Optional; Unique ID for the experiment run that predictions and labels come from.\n block_size (int): Optional; Size of block image sub-division in pixels.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n show (bool): Optional; Show the figure when plotted.\n save (bool): Optional; Save the figure to file to ``fn_prefix``.\n fig_dim (tuple[float, float]): Optional; Figure (height, width) in inches.\n fn_prefix (str | ~pathlib.Path): Optional; Common filename prefix (including path to file) for all plots of\n this type from this experiment. Appended with the sample ID to give the filename to save the plot to.\n\n Returns:\n None\n \"\"\"\n # Stacks together the R, G, & B bands to form an array of the RGB image.\n rgb_image = sample[\"image\"]\n z = sample[\"pred\"]\n y = sample[\"mask\"]\n bounds = sample[\"bounds\"]\n\n extent, lat_extent, lon_extent = dec_extent_to_deg(\n y.shape, bounds, src_crs, new_crs=new_crs, spacing=block_size\n )\n\n centre = utils.transform_coordinates(\n *utils.get_centre_loc(bounds), src_crs=src_crs, new_crs=new_crs\n )\n\n # Initialises a figure.\n fig = plt.figure(figsize=fig_dim)\n\n gs = GridSpec(nrows=2, ncols=2, figure=fig)\n\n axes: NDArray[Shape[\"3\"], Any] = np.array(\n [\n fig.add_subplot(gs[0, 0]),\n fig.add_subplot(gs[0, 1]),\n fig.add_subplot(gs[1, :]),\n ]\n )\n\n cmap = get_mlp_cmap(cmap_style, len(classes))\n\n # Plots heatmap onto figure.\n z_heatmap = axes[0].imshow(z, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5)\n _ = axes[1].imshow(y, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5)\n\n # Create RGB image.\n axes[2].imshow(rgb_image, extent=extent)\n\n # Sets tick intervals to standard 32x32 block size.\n axes[0].set_xticks(np.arange(0, z.shape[0] + 1, block_size))\n axes[0].set_yticks(np.arange(0, z.shape[1] + 1, block_size))\n\n axes[1].set_xticks(np.arange(0, y.shape[0] + 1, block_size))\n axes[1].set_yticks(np.arange(0, y.shape[1] + 1, block_size))\n\n axes[2].set_xticks(np.arange(0, rgb_image.shape[0] + 1, block_size))\n axes[2].set_yticks(np.arange(0, rgb_image.shape[1] + 1, block_size))\n\n # Add grid overlay.\n axes[0].grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n axes[1].grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n axes[2].grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n # Converts the decimal lat-lon into degrees, minutes, seconds to label the axis.\n lat_labels = utils.dec2deg(lat_extent, axis=\"lat\")\n lon_labels = utils.dec2deg(lon_extent, axis=\"lon\")\n\n # Sets the secondary axis tick labels.\n axes[2].set_xticklabels(lon_labels, fontsize=9, rotation=30)\n axes[2].set_yticklabels(lat_labels, fontsize=9)\n\n # Plots colour bar onto figure.\n clb = fig.colorbar(\n z_heatmap,\n ax=axes.ravel().tolist(),\n location=\"top\",\n ticks=np.arange(0, len(classes)),\n aspect=75,\n drawedges=True,\n )\n\n # Sets colour bar ticks to class labels.\n clb.ax.set_xticklabels(classes.values(), fontsize=9)\n\n # Set figure title and subplot titles.\n fig.suptitle(\n f\"{sample_id}: {utils.lat_lon_to_loc(lat=str(centre[1]), lon=str(centre[0]))}\",\n fontsize=15,\n )\n axes[0].set_title(\"Predicted\", fontsize=13)\n axes[1].set_title(\"Ground Truth\", fontsize=13)\n axes[2].set_title(\"Reference Imagery\", fontsize=13)\n\n # Set axis labels.\n axes[0].set_xlabel(\"(x) - Pixel Position\", fontsize=10)\n axes[0].set_ylabel(\"(y) - Pixel Position\", fontsize=10)\n axes[1].set_xlabel(\"(x) - Pixel Position\", fontsize=10)\n axes[1].set_ylabel(\"(y) - Pixel Position\", fontsize=10)\n axes[2].set_xlabel(\"Longitude\", fontsize=10)\n axes[2].set_ylabel(\"Latitude\", fontsize=10)\n\n # Display figure.\n if show:\n plt.show(block=False)\n\n if fn_prefix is None:\n path = universal_path(CONFIG[\"dir\"][\"results\"])\n fn_prefix = str(path / f\"{exp_id}_{utils.timestamp_now()}_Mask\")\n\n # Path and file name of figure.\n fn = f\"{fn_prefix}_{sample_id}.png\"\n\n # If true, save file to fn.\n if save:\n # Checks if file already exists. Deletes if true.\n utils.exist_delete_check(fn)\n\n # Save figure to fn.\n fig.savefig(fn)\n\n # Close figure.\n plt.close()\n\n\ndef seg_plot(\n z: Union[List[int], NDArray[Any, Any]],\n y: Union[List[int], NDArray[Any, Any]],\n ids: List[str],\n bounds: Union[Sequence[Any], NDArray[Any, Any]],\n mode: str,\n classes: Dict[int, str],\n colours: Dict[int, str],\n fn_prefix: Union[str, Path],\n frac: float = 0.05,\n fig_dim: Optional[Tuple[Union[int, float], Union[int, float]]] = (9.3, 10.5),\n) -> None:\n \"\"\"Custom function for pre-processing the outputs from image segmentation testing for data visualisation.\n\n Args:\n z (list[float]): Predicted segmentation masks by the network.\n y (list[float]): Corresponding ground truth masks.\n ids (list[str]): Corresponding patch IDs for the test data supplied to the network.\n bounds (list[~torchgeo.datasets.utils.BoundingBox] | ~numpy.ndarray[~torchgeo.datasets.utils.BoundingBox]):\n Array of objects describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n mode (str): Mode samples are from. Must be ``'train'``, ``'val'`` or ``'test'``.\n classes (dict[int, str]): Dictionary mapping class labels to class names.\n colours (dict[int, str]): Dictionary mapping class labels to colours.\n fn_prefix (str | ~pathlib.Path): Common filename prefix (including path to file) for all plots of this type\n from this experiment to use.\n frac (float): Optional; Fraction of patch samples to plot.\n fig_dim (tuple[float, float]): Optional; Figure (height, width) in inches.\n\n Returns:\n None\n \"\"\"\n # TODO: This is a very naughty way of avoiding a circular import.\n # Need to reorganise package to avoid need for this.\n from minerva.datasets import make_dataset\n\n if not isinstance(z, np.ndarray):\n z = np.array(z)\n\n if not isinstance(y, np.ndarray):\n y = np.array(y)\n\n z = np.reshape(z, (z.shape[0] * z.shape[1], z.shape[2], z.shape[3]))\n y = np.reshape(y, (y.shape[0] * y.shape[1], y.shape[2], y.shape[3]))\n flat_ids: NDArray[Any, Any] = np.array(ids).flatten()\n\n print(\"\\nRE-CONSTRUCTING DATASET\")\n dataset, _ = make_dataset(CONFIG[\"dir\"][\"data\"], CONFIG[\"dataset_params\"][mode])\n\n # Create a new projection system in lat-lon.\n crs = dataset.crs\n\n print(\"\\nPRODUCING PREDICTED MASKS\")\n\n # Limits number of masks to produce to a fractional number of total and no more than `_MAX_SAMPLES`.\n n_samples = int(frac * len(flat_ids))\n if n_samples > _MAX_SAMPLES:\n n_samples = _MAX_SAMPLES\n\n # Initialises a progress bar for the epoch.\n with alive_bar(n_samples, bar=\"blocks\") as bar:\n # Plots the predicted versus ground truth labels for all test patches supplied.\n for i in random.sample(range(len(flat_ids)), n_samples):\n image = stack_rgb(dataset[bounds[i]][\"image\"].numpy())\n sample = {\"image\": image, \"pred\": z[i], \"mask\": y[i], \"bounds\": bounds[i]}\n\n prediction_plot(\n sample,\n flat_ids[i],\n classes=classes,\n src_crs=crs,\n exp_id=CONFIG[\"model_name\"],\n show=False,\n fn_prefix=fn_prefix,\n fig_dim=fig_dim,\n cmap_style=ListedColormap(colours.values(), N=len(colours)), # type: ignore\n )\n\n bar()\n\n\ndef plot_subpopulations(\n class_dist: List[Tuple[int, int]],\n class_names: Dict[int, str],\n cmap_dict: Dict[int, str],\n filename: Optional[Union[str, Path]] = None,\n save: bool = True,\n show: bool = False,\n) -> None:\n \"\"\"Creates a pie chart of the distribution of the classes within the data.\n\n Args:\n class_dist (list[tuple[int, int]]): Modal distribution of classes in the dataset provided.\n class_names (dict[int, str]): Optional; Dictionary mapping class labels to class names.\n cmap_dict (dict[int, str]): Optional; Dictionary mapping class labels to class colours.\n filename (str): Optional; Name of file to save plot to.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n\n Returns:\n None\n \"\"\"\n # List to hold the name and percentage distribution of each class in the data as str.\n class_data = []\n\n # List to hold the total counts of each class.\n counts = []\n\n # List to hold colours of classes in the correct order.\n colours = []\n\n # Finds total number of samples to normalise data.\n n_samples = 0\n for mode in class_dist:\n n_samples += mode[1]\n\n # For each class, find the percentage of data that is that class and the total counts for that class.\n for label in class_dist:\n # Sets percentage label to <0.01% for classes matching that equality.\n if (label[1] * 100.0 / n_samples) > 0.01:\n class_data.append(\n \"{} \\n{:.2f}%\".format(\n class_names[label[0]], (label[1] * 100.0 / n_samples)\n )\n )\n else:\n class_data.append(\"{} \\n<0.01%\".format(class_names[label[0]]))\n counts.append(label[1])\n colours.append(cmap_dict[label[0]])\n\n # Locks figure size.\n plt.figure(figsize=(6, 5))\n\n # Plot a pie chart of the data distribution amongst the classes.\n patches, _ = plt.pie(\n counts, colors=colours, explode=[i * 0.05 for i in range(len(class_data))]\n )\n\n # Adds legend.\n plt.legend(\n patches, class_data, loc=\"center left\", bbox_to_anchor=(1, 0.5), frameon=False\n )\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n plt.close()\n\n\ndef plot_history(\n metrics: Dict[str, Any],\n filename: Optional[Union[str, Path]] = None,\n save: bool = True,\n show: bool = False,\n) -> None:\n \"\"\"Plots model history based on metrics supplied.\n\n Args:\n metrics (dict[str, ~typing.Any]): Dictionary containing the names and results of the metrics\n by which model was assessed.\n filename (str): Optional; Name of file to save plot to.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n\n Returns:\n None\n \"\"\"\n # Initialise figure.\n ax = plt.figure().gca()\n\n # Plots each metric in metrics, appending their artist handles.\n handles = []\n labels = []\n for key in metrics:\n # Checks that the length of x matches y and is greater than 1 so can be plotted.\n if len(metrics[key][\"x\"]) == len(metrics[key][\"y\"]) >= 1.0:\n # Plot metric.\n handles.append(ax.plot(metrics[key][\"x\"], metrics[key][\"y\"])[0])\n labels.append(key)\n\n # Creates legend from plot artist handles and names of metrics.\n ax.legend(handles=handles, labels=labels)\n\n # Forces x-axis ticks to be integers.\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\n # Adds a grid overlay with green dashed lines.\n ax.grid(color=\"green\", linestyle=\"--\", linewidth=0.5) # For some funky gridlines\n\n # Adds axis labels.\n ax.set_xlabel(\"Epoch\")\n ax.set_ylabel(\"Loss/Accuracy\")\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n plt.close()\n\n\ndef make_confusion_matrix(\n pred: Union[List[int], NDArray[Any, Int]],\n labels: Union[List[int], NDArray[Any, Int]],\n classes: Dict[int, str],\n filename: Optional[Union[str, Path]] = None,\n cmap_style: str = \"Blues\",\n show: bool = True,\n save: bool = False,\n) -> None:\n \"\"\"Creates a heat-map of the confusion matrix of the given model.\n\n Args:\n pred(list[int]): Predictions made by model on test images.\n labels (list[int]): Accompanying ground truth labels for testing images.\n classes (dict[int, str]): Dictionary mapping class labels to class names.\n filename (str): Optional; Name of file to save plot to.\n cmap_style (str): Colourmap style to use in the confusion matrix.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n\n Returns:\n None\n \"\"\"\n _pred, _labels, new_classes = utils.check_test_empty(pred, labels, classes)\n\n # Extract class names from dict in numeric order to ensure labels match matrix.\n class_names = [new_classes[key] for key in range(len(new_classes.keys()))]\n\n if DATA_CONFIG is not None:\n figsize = DATA_CONFIG[\"fig_sizes\"][\"CM\"]\n else: # pragma: no cover\n figsize = None\n\n # Creates the figure to plot onto.\n ax = plt.figure(figsize=figsize).gca()\n\n # Get a matplotlib colourmap based on the style specified to use for the confusion matrix.\n cmap = get_mlp_cmap(cmap_style)\n\n # Creates, plots and normalises the confusion matrix.\n cm = ConfusionMatrixDisplay.from_predictions(\n _labels,\n _pred,\n labels=list(new_classes.keys()),\n normalize=\"all\",\n display_labels=class_names,\n cmap=cmap,\n ax=ax,\n )\n\n # Normalises the colourbar to between [0, 1] for consistent clarity.\n cm.ax_.get_images()[0].set_clim(0, 1)\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n plt.close()\n\n\ndef make_roc_curves(\n probs: ArrayLike,\n labels: Union[Sequence[int], NDArray[Any, Int]],\n class_names: Dict[int, str],\n colours: Dict[int, str],\n micro: bool = True,\n macro: bool = True,\n filename: Optional[Union[str, Path]] = None,\n show: bool = False,\n save: bool = True,\n) -> None:\n \"\"\"Plots ROC curves for each class, the micro and macro average ROC curves and accompanying AUCs.\n\n Adapted from Scikit-learn's example at:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Args:\n probs (list | ~numpy.ndarray[int]): Array of probabilistic predicted classes from model where each sample\n should have a list of the predicted probability for each class.\n labels (list | ~numpy.ndarray[int]): List of corresponding ground truth labels.\n class_names (dict[int, str]): Dictionary mapping class labels to class names.\n colours (dict[int, str]): Dictionary mapping class labels to colours.\n micro (bool): Optional; Whether to compute and plot the micro average ROC curves.\n macro (bool): Optional; Whether to compute and plot the macro average ROC curves.\n filename (str | ~pathlib.Path): Optional; Name of file to save plot to.\n save (bool): Optional; Whether to save the plots to file.\n show (bool): Optional; Whether to show the plots.\n\n Returns:\n None\n \"\"\"\n # Gets the class labels as a list from the class_names dict.\n class_labels = [key for key in class_names.keys()]\n\n # Reshapes the probabilities to be (n_samples, n_classes).\n probs = np.reshape(probs, (len(labels), len(class_labels)))\n\n # Computes all class, micro and macro average ROC curves and AUCs.\n fpr, tpr, roc_auc = utils.compute_roc_curves(\n probs, labels, class_labels, micro=micro, macro=macro\n )\n\n # Plot all ROC curves\n print(\"\\nPlotting ROC Curves\")\n plt.figure()\n\n if micro:\n # Plot micro average ROC curves.\n plt.plot(\n fpr[\"micro\"],\n tpr[\"micro\"],\n label=\"Micro-average (AUC = {:.2f})\".format(roc_auc[\"micro\"]),\n color=\"deeppink\",\n linestyle=\"dotted\",\n )\n\n if macro:\n # Plot macro average ROC curves.\n plt.plot(\n fpr[\"macro\"],\n tpr[\"macro\"],\n label=\"Macro-average (AUC = {:.2f})\".format(roc_auc[\"macro\"]),\n color=\"navy\",\n linestyle=\"dotted\",\n )\n\n # Plot all class ROC curves.\n for key in class_labels:\n try:\n plt.plot(\n fpr[key],\n tpr[key],\n color=colours[key],\n label=f\"{class_names[key]} \" + \"(AUC = {:.2f})\".format(roc_auc[key]),\n )\n except KeyError:\n pass\n\n # Plot random classifier diagonal.\n plt.plot([0, 1], [0, 1], \"k--\")\n\n # Set limits.\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n\n # Set axis labels.\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n\n # Position legend in lower right corner of figure where no classifiers should exist.\n plt.legend(loc=\"lower right\")\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n print(\"ROC Curves plot SAVED\")\n plt.close()\n\n\ndef plot_embedding(\n embeddings: Any,\n bounds: Union[Sequence[BoundingBox], NDArray[Any, Any]],\n mode: str,\n title: Optional[str] = None,\n show: bool = False,\n save: bool = True,\n filename: Optional[Union[Path, str]] = None,\n) -> None:\n \"\"\"Using TSNE Clustering, visualises the embeddings from a model.\n\n Args:\n embeddings (~typing.Any): Embeddings from a model.\n bounds (~typing.Sequence[~torchgeo.datasets.utils.BoundingBox] | ~numpy.ndarray[~torchgeo.datasets.utils.BoundingBox]): # noqa: E501\n Array of objects describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n mode (str): Mode samples are from. Must be ``'train'``, ``'val'`` or ``'test'``.\n title (str): Optional; Title of plot.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n filename (str): Optional; Name of file to save plot to.\n\n Returns:\n None\n \"\"\"\n\n x = utils.tsne_cluster(embeddings)\n\n # TODO: This is a very naughty way of avoiding a circular import.\n # Need to reorganise package to avoid need for this.\n from minerva.datasets import make_dataset\n\n print(\"\\nRE-CONSTRUCTING DATASET\")\n dataset, _ = make_dataset(CONFIG[\"dir\"][\"data\"], CONFIG[\"dataset_params\"][mode])\n\n images = []\n targets = []\n\n # Initialises a progress bar for the epoch.\n with alive_bar(len(x), bar=\"blocks\") as bar:\n # Plots the predicted versus ground truth labels for all test patches supplied.\n for i in range(len(x)):\n sample = dataset[bounds[i]]\n images.append(stack_rgb(sample[\"image\"].numpy()))\n targets.append(\n [\n int(stats.mode(mask.flatten(), keepdims=False).mode)\n for mask in sample[\"mask\"].numpy()\n ]\n )\n\n bar()\n\n x_min, x_max = np.min(x, 0), np.max(x, 0)\n x = (x - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(10, 10))\n ax = plt.subplot(111)\n\n for i in range(len(x)):\n plt.text(\n x[i, 0],\n x[i, 1],\n str(targets[i]),\n color=plt.cm.Set1(targets[i][0] / 10.0), # type: ignore\n fontdict={\"weight\": \"bold\", \"size\": 9},\n )\n\n if hasattr(offsetbox, \"AnnotationBbox\"):\n # only print thumbnails with matplotlib > 1.0\n shown_images: NDArray[Any, Any] = np.array([[1.0, 1.0]]) # just something big\n\n for i in range(len(images)):\n dist = np.sum((x[i] - shown_images) ** 2, 1)\n if np.min(dist) < 4e-3:\n # don’t show points that are too close\n continue # pragma: no cover\n\n shown_images = np.r_[shown_images, [x[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(images[i], cmap=plt.cm.gray_r), x[i] # type: ignore\n )\n\n ax.add_artist(imagebox)\n\n plt.xticks([]), plt.yticks([]) # type: ignore\n\n if title is not None:\n plt.title(title)\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n if filename is None: # pragma: no cover\n filename = \"tsne_cluster_vis.png\"\n os.makedirs(Path(filename).parent, exist_ok=True)\n plt.savefig(filename)\n print(\"TSNE cluster visualisation SAVED\")\n plt.close()\n\n\ndef format_plot_names(\n model_name: str, timestamp: str, path: Union[Sequence[str], str, Path]\n) -> Dict[str, Path]:\n \"\"\"Creates unique filenames of plots in a standardised format.\n\n Args:\n model_name (str): Name of model. e.g. ``\"MLP-MkVI\"``.\n timestamp (str): Time and date to be used to identify experiment.\n path (list[str] | str | ~pathlib.Path]): Path to the directory for storing plots as a :class:`list`\n of strings for each level.\n\n Returns:\n filenames (dict[str, ~pathlib.Path]): Formatted filenames for plots.\n \"\"\"\n\n def standard_format(plot_type: str, *sub_dir) -> str:\n \"\"\"Creates a unique filename for a plot in a standardised format.\n\n Args:\n plot_type (str): Plot type to use in filename.\n sub_dir (str): Additional subdirectories to add to path to filename.\n\n Returns:\n str: String of path to filename of the form ``\"{model_name}_{timestamp}_{plot_type}.{file_ext}\"``\n \"\"\"\n filename = f\"{model_name}_{timestamp}_{plot_type}\"\n return str(universal_path(path) / universal_path(sub_dir) / filename)\n\n filenames = {\n \"History\": Path(standard_format(\"MH\") + \".png\"),\n \"Pred\": Path(standard_format(\"TP\") + \".png\"),\n \"CM\": Path(standard_format(\"CM\") + \".png\"),\n \"ROC\": Path(standard_format(\"ROC\" + \".png\")),\n \"Mask\": Path(standard_format(\"Mask\", \"Masks\")),\n \"PvT\": Path(standard_format(\"PvT\", \"PvTs\")),\n \"TSNE\": Path(standard_format(\"TSNE\") + \".png\"),\n }\n\n return filenames\n\n\ndef plot_results(\n plots: Dict[str, bool],\n z: Optional[Union[List[int], NDArray[Any, Int]]] = None,\n y: Optional[Union[List[int], NDArray[Any, Int]]] = None,\n metrics: Optional[Dict[str, Any]] = None,\n ids: Optional[List[str]] = None,\n mode: str = \"test\",\n bounds: Optional[NDArray[Any, Any]] = None,\n probs: Optional[Union[List[float], NDArray[Any, Float]]] = None,\n embeddings: Optional[NDArray[Any, Any]] = None,\n class_names: Optional[Dict[int, str]] = None,\n colours: Optional[Dict[int, str]] = None,\n save: bool = True,\n show: bool = False,\n model_name: Optional[str] = None,\n timestamp: Optional[str] = None,\n results_dir: Optional[Union[Sequence[str], str, Path]] = None,\n) -> None:\n \"\"\"Orchestrates the creation of various plots from the results of a model fitting.\n\n Args:\n plots (dict[str, bool]): Dictionary defining which plots to make.\n z (list[list[int]] | ~numpy.ndarray[~numpy.ndarray[int]]): List of predicted label masks.\n y (list[list[int]] | ~numpy.ndarray[~numpy.ndarray[int]]): List of corresponding ground truth label masks.\n metrics (dict[str, ~typing.Any]): Optional; Dictionary containing a log of various metrics used to assess\n the performance of a model.\n ids (list[str]): Optional; List of IDs defining the origin of samples to the model.\n Maybe either patch IDs or scene tags.\n mode (str): Optional; Mode samples are from. Must be ``'train'``, ``'val'`` or ``'test'``.\n bounds (~numpy.ndarray[~torchgeo.datasets.utils.BoundingBox]): Optional; Array of objects describing\n a geospatial bounding box for each sample.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n probs (list[float] | ~numpy.ndarray[float]): Optional; Array of probabilistic predicted classes\n from model where each sample should have a list of the predicted probability for each class.\n embeddings (~numpy.ndarray[~typing.Any]): Embeddings from the model to visualise with TSNE clustering.\n class_names (dict[int, str]): Optional; Dictionary mapping class labels to class names.\n colours (dict[int, str]): Optional; Dictionary mapping class labels to colours.\n save (bool): Optional; Save the plots to file.\n show (bool): Optional; Show the plots.\n model_name (str): Optional; Name of model. e.g. MLP-MkVI.\n timestamp (str): Optional; Time and date to be used to identify experiment.\n If not specified, the current date-time is used.\n results_dir (list[str] | str | ~pathlib.Path): Optional; Path to the directory for storing plots.\n\n Notes:\n ``save==True``, ``show==False`` regardless of input for plots made for each sample such as PvT or Mask plots.\n\n Returns:\n None\n \"\"\"\n if not show:\n # Ensures that there is no attempt to display figures incase no display is present.\n try:\n mlp.use(\"agg\")\n except ImportError: # pragma: no cover\n pass\n\n flat_z = None\n flat_y = None\n\n if z is not None:\n flat_z = utils.batch_flatten(z)\n\n if y is not None:\n flat_y = utils.batch_flatten(y)\n\n if timestamp is None:\n timestamp = utils.timestamp_now(fmt=\"%d-%m-%Y_%H%M\")\n\n if model_name is None:\n model_name = CONFIG[\"model_name\"]\n assert model_name is not None\n\n if results_dir is None:\n results_dir = CONFIG[\"dir\"][\"results\"]\n assert isinstance(results_dir, (Sequence, str, Path))\n\n filenames = format_plot_names(model_name, timestamp, results_dir)\n\n try:\n os.mkdir(universal_path(results_dir))\n except FileExistsError as err:\n print(err)\n\n if plots.get(\"History\", False):\n assert metrics is not None\n\n print(\"\\nPLOTTING MODEL HISTORY\")\n plot_history(metrics, filename=filenames[\"History\"], save=save, show=show)\n\n if plots.get(\"CM\", False):\n assert class_names is not None\n assert flat_y is not None\n assert flat_z is not None\n\n print(\"\\nPLOTTING CONFUSION MATRIX\")\n make_confusion_matrix(\n labels=flat_y,\n pred=flat_z,\n classes=class_names,\n filename=filenames[\"CM\"],\n save=save,\n show=show,\n )\n\n if plots.get(\"Pred\", False):\n assert class_names is not None\n assert colours is not None\n assert flat_z is not None\n\n print(\"\\nPLOTTING CLASS DISTRIBUTION OF PREDICTIONS\")\n plot_subpopulations(\n utils.find_modes(flat_z),\n class_names=class_names,\n cmap_dict=colours,\n filename=filenames[\"Pred\"],\n save=save,\n show=show,\n )\n\n if plots.get(\"ROC\", False):\n assert class_names is not None\n assert colours is not None\n assert probs is not None\n assert flat_y is not None\n\n print(\"\\nPLOTTING ROC CURVES\")\n make_roc_curves(\n probs,\n flat_y,\n class_names=class_names,\n colours=colours,\n filename=filenames[\"ROC\"],\n micro=plots[\"micro\"],\n macro=plots[\"macro\"],\n save=save,\n show=show,\n )\n\n if plots.get(\"Mask\", False):\n assert class_names is not None\n assert colours is not None\n assert z is not None\n assert y is not None\n assert ids is not None\n assert bounds is not None\n assert mode is not None\n\n figsize = None\n if DATA_CONFIG is not None:\n figsize = DATA_CONFIG[\"fig_sizes\"][\"Mask\"]\n\n flat_bbox = utils.batch_flatten(bounds)\n os.makedirs(universal_path(results_dir) / \"Masks\", exist_ok=True)\n seg_plot(\n z,\n y,\n ids,\n flat_bbox,\n mode,\n fn_prefix=filenames[\"Mask\"],\n classes=class_names,\n colours=colours,\n fig_dim=figsize,\n )\n\n if plots.get(\"TSNE\", False):\n assert embeddings is not None\n assert bounds is not None\n assert mode is not None\n\n print(\"\\nPERFORMING TSNE CLUSTERING\")\n plot_embedding(\n embeddings,\n bounds,\n mode,\n show=show,\n save=save,\n filename=filenames[\"TSNE\"],\n )\n" }, "sourceLanguage": "Python" }, @@ -16612,9 +16965,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 18579, + "charLength": 51811, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle generic functionality for running :mod:`minerva` scripts.\n\nAttributes:\n GENERIC_PARSER (~argparse.ArgumentParser): A standard argparser with arguments for use in :mod:`minerva`.\n Can be used as the basis for a user defined extended argparser.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"GENERIC_PARSER\",\n \"WandbConnectionManager\",\n \"setup_wandb_run\",\n \"config_env_vars\",\n \"config_args\",\n \"distributed_run\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nimport signal\nimport subprocess\nfrom argparse import Namespace\nfrom typing import Any, Callable, Optional, Union\n\nimport requests\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom wandb.sdk.lib import RunDisabled\nfrom wandb.sdk.wandb_run import Run\n\nimport wandb\nfrom minerva.utils import CONFIG, MASTER_PARSER, utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# ---+ CLI +--------------------------------------------------------------+\nGENERIC_PARSER = argparse.ArgumentParser(parents=[MASTER_PARSER])\n\nGENERIC_PARSER.add_argument(\n \"-o\",\n \"--override\",\n dest=\"override\",\n action=\"store_true\",\n help=\"Override config arguments with the CLI arguments where they overlap.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--seed\",\n dest=\"seed\",\n type=int,\n default=42,\n help=\"Set seed number\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-name\",\n dest=\"model_name\",\n type=str,\n help=\"Name of model.\"\n + \" Sub-string before hyphen is taken as model class name.\"\n + \" Sub-string past hyphen can be used to differeniate between versions.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-type\",\n dest=\"model_type\",\n type=str,\n help=\"Type of model. Should be 'segmentation', 'scene_classifier', 'siamese' or 'mlp'\",\n choices=(\"segmentation\", \"ssl\", \"siamese\", \"scene_classifier\", \"mlp\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--max_epochs\",\n dest=\"max_epochs\",\n type=int,\n default=100,\n help=\"Maximum number of training epochs.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--batch-size\",\n dest=\"batch_size\",\n type=int,\n default=8,\n help=\"Number of samples in each batch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--lr\",\n dest=\"lr\",\n type=float,\n default=0.01,\n help=\"Learning rate of the optimiser.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--optim-func\",\n dest=\"optim_func\",\n type=str,\n default=\"SGD\",\n help=\"Name of the optimiser to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``optim_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--loss-func\",\n dest=\"loss_func\",\n type=str,\n default=\"CrossEntropyLoss\",\n help=\"Name of the loss function to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``loss_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--pre-train\",\n dest=\"pre_train\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--fine-tune\",\n dest=\"fine_tune\",\n action=\"store_true\",\n help=\"Sets experiment type to fine-tune. Will load pre-trained backbone from file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--eval\",\n dest=\"eval\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--balance\",\n dest=\"balance\",\n action=\"store_true\",\n help=\"Activates class balancing.\"\n + \" Depending on `model_type`, this will either be via sampling or weighting of the loss function.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--class-elim\",\n dest=\"elim\",\n action=\"store_true\",\n help=\"Eliminates classes that are specified in config but not present in the data.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--sample-pairs\",\n dest=\"sample_pairs\",\n action=\"store_true\",\n help=\"Use paired sampling. E.g. For Siamese models.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-model\",\n dest=\"save_model\",\n type=str,\n default=False,\n help=\"Whether to save the model at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically save the model to file.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--run-tensorboard\",\n dest=\"run_tensorboard\",\n type=str,\n default=False,\n help=\"Whether to run the Tensorboard logs at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically locate and run the logs on a local browser.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-plots-no\",\n dest=\"save\",\n action=\"store_false\",\n help=\"Plots created will not be saved to file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--show-plots\",\n dest=\"show\",\n action=\"store_true\",\n help=\"Show plots created in a window.\"\n + \" Warning: Do not use with a terminal-less operation, e.g. SLURM.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--print-dist\",\n dest=\"p_dist\",\n action=\"store_true\",\n help=\"Print the distribution of classes within the data to `stdout`.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--plot-last-epoch\",\n dest=\"plot_last_epoch\",\n action=\"store_true\",\n help=\"Plot the results from the final validation epoch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log\",\n dest=\"wandb_log\",\n action=\"store_true\",\n help=\"Activate Weights and Biases logging.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--project_name\",\n dest=\"project\",\n type=str,\n help=\"Name of the Weights and Biases project this experiment belongs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-entity\",\n dest=\"entity\",\n type=str,\n help=\"The Weights and Biases entity to send runs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-dir\",\n dest=\"wandb_dir\",\n type=str,\n default=\"./wandb\",\n help=\"Where to store the Weights and Biases logs locally.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log-all\",\n dest=\"log_all\",\n action=\"store_true\",\n help=\"Will log each process on Weights and Biases. Otherwise, logging will be performed from the master process.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--knn-k\",\n dest=\"knn_k\",\n type=int,\n default=200,\n help=\"Top k most similar images used to predict the image for KNN validation.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--val-freq\",\n dest=\"val_freq\",\n type=int,\n default=5,\n help=\"Perform a validation epoch with KNN for every ``val_freq``\"\n + \"training epochs for SSL or Siamese models.\",\n)\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass WandbConnectionManager:\n \"\"\"Checks for a connection to :mod:`wandb`. If not, sets :mod:`wandb` to offline during context.\"\"\"\n\n def __init__(self) -> None:\n try:\n requests.head(\"http://www.wandb.ai/\", timeout=0.1)\n self._on = True\n except requests.ConnectionError:\n self._on = False\n\n def __enter__(self) -> None:\n if self._on:\n os.environ[\"WANDB_MODE\"] = \"online\"\n else:\n os.environ[\"WANDB_MODE\"] = \"offline\"\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.environ[\"WANDB_MODE\"] = \"online\"\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}',\n shell=True,\n )\n exit()\n\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n\n\ndef setup_wandb_run(gpu: int, args: Namespace) -> Optional[Union[Run, RunDisabled]]:\n \"\"\"Sets up a :mod:`wandb` logger for either every process, the master process or not if not logging.\n\n Note:\n ``args`` must contain these keys:\n\n * ``wandb_log`` (bool): Activate :mod:`wandb` logging.\n * | ``log_all`` (bool): :mod:`wandb` logging on every process if ``True``.\n | Only log on master process if ``False``.\n * ``entity`` (str): :mod:`wandb` entity where to send runs to.\n * ``project`` (str): Name of the :mod:`wandb` project this experiment belongs to.\n * ``world_size`` (int): Total number of processes across the experiment.\n\n Args:\n gpu (int): Local process (GPU) number.\n args (~argparse.Namespace): CLI arguments from :mod:`argparse`.\n\n Returns:\n ~wandb.sdk.wandb_run.Run | ~wandb.sdk.lib.RunDisabled | None: The :mod:`wandb` run object\n for this process or ``None`` if ``log_all=False`` and ``rank!=0``.\n \"\"\"\n run: Optional[Union[Run, RunDisabled]] = None\n if CONFIG.get(\"wandb_log\", False) or CONFIG.get(\"project\", None):\n try:\n if CONFIG.get(\"log_all\", False) and args.world_size > 1:\n run = wandb.init( # pragma: no cover\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n group=CONFIG.get(\"group\", \"DDP\"),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n else:\n if gpu == 0:\n run = wandb.init(\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n CONFIG[\"wandb_log\"] = True\n except wandb.UsageError: # type: ignore[attr-defined] # pragma: no cover\n print(\n \"wandb API Key has not been inited.\",\n \"\\nEither call wandb.login(key=[your_api_key]) or use `wandb login` in the shell.\",\n \"\\nOr if not using wandb, safely ignore this message.\",\n )\n else:\n print(\"Weights and Biases logging OFF\")\n\n return run\n\n\ndef config_env_vars(args: Namespace) -> Namespace:\n \"\"\"Finds SLURM environment variables (if they exist) and configures args accordingly.\n\n If SLURM variables are found in the environment variables, the arguments are configured for a SLURM job:\n\n * ``args.rank`` is set to the ``SLURM_NODEID * args.ngpus_per_node``.\n * ``args.world_size`` is set to ``SLURM_NNODES * args.ngpus_per_node``.\n * ``args.dist_url`` is set to ``tcp://{host_name}:58472``\n\n If SLURM variables are not detected, the arguments are configured for a single-node job:\n\n * ``args.rank=0``.\n * ``args.world_size=args.ngpus_per_node``.\n * ``args.dist_url = \"tcp://localhost:58472\"``.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n if \"SLURM_JOB_ID\" in os.environ: # pragma: no cover\n # Single-node and multi-node distributed training on SLURM cluster.\n # Requeue job on SLURM preemption.\n signal.signal(signal.SIGUSR1, _handle_sigusr1)\n signal.signal(signal.SIGTERM, _handle_sigterm)\n\n # Get SLURM variables.\n slurm_job_nodelist: Optional[str] = os.getenv(\"SLURM_JOB_NODELIST\")\n slurm_nodeid: Optional[str] = os.getenv(\"SLURM_NODEID\")\n slurm_nnodes: Optional[str] = os.getenv(\"SLURM_NNODES\")\n slurm_jobid: Optional[str] = os.getenv(\"SLURM_JOB_ID\")\n\n # Check that SLURM variables have been found.\n assert slurm_job_nodelist is not None\n assert slurm_nodeid is not None\n assert slurm_nnodes is not None\n assert slurm_jobid is not None\n\n # Find a common host name on all nodes.\n # Assume scontrol returns hosts in the same order on all nodes.\n cmd = \"scontrol show hostnames \" + slurm_job_nodelist\n stdout = subprocess.check_output(cmd.split())\n host_name = stdout.decode().splitlines()[0]\n args.rank = int(slurm_nodeid) * args.ngpus_per_node\n args.world_size = int(slurm_nnodes) * args.ngpus_per_node\n args.dist_url = f\"tcp://{host_name}:58472\"\n args.jobid = slurm_jobid\n\n else:\n # Single-node distributed training.\n args.rank = 0\n args.dist_url = \"tcp://localhost:58472\"\n args.world_size = args.ngpus_per_node\n args.jobid = None\n\n return args\n\n\ndef config_args(args: Namespace) -> Namespace:\n \"\"\"Prepare the arguments generated from the :mod:`argparse` CLI for the job run.\n\n * Finds and sets ``args.ngpus_per_node``;\n * updates the ``CONFIG`` with new arguments from the CLI;\n * sets the seeds from the seed found in ``CONFIG`` or from CLI;\n * uses :func:`config_env_vars` to determine the correct arguments for distributed computing jobs e.g. SLURM.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n args.ngpus_per_node = torch.cuda.device_count()\n\n # Convert CLI arguments to dict.\n args_dict = vars(args)\n\n # Find which CLI arguments are not in the config.\n new_args = {key: args_dict[key] for key in args_dict if key not in CONFIG}\n\n # Updates the config with new arguments from the CLI.\n CONFIG.update(new_args)\n\n # Overrides the arguments from the config with those of the CLI where they overlap.\n # WARNING: This will include the use of the default CLI arguments.\n if args_dict.get(\"override\"): # pragma: no cover\n updated_args = {\n key: args_dict[key]\n for key in args_dict\n if args_dict[key] != CONFIG[key] and args_dict[key] is not None\n }\n CONFIG.update(updated_args)\n\n # Get seed from config.\n seed = CONFIG.get(\"seed\", 42)\n\n # Set torch, numpy and inbuilt seeds for reproducibility.\n utils.set_seeds(seed)\n\n return config_env_vars(args)\n\n\ndef _run_preamble(\n gpu: int, run: Callable[[int, Namespace], Any], args: Namespace\n) -> None: # pragma: no cover\n # Calculates the global rank of this process.\n args.rank += gpu\n\n # Setups the `wandb` run for this process.\n args.wandb_run = setup_wandb_run(gpu, args)\n\n if args.world_size > 1:\n dist.init_process_group( # type: ignore[attr-defined]\n backend=\"gloo\",\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n print(f\"INITIALISED PROCESS ON {args.rank}\")\n\n if torch.cuda.is_available():\n torch.cuda.set_device(gpu)\n torch.backends.cudnn.benchmark = True # type: ignore\n\n # Start this process run.\n run(gpu, args)\n\n\ndef distributed_run(run: Callable[[int, Namespace], Any], args: Namespace) -> None:\n \"\"\"Runs the supplied function and arguments with distributed computing according to arguments.\n\n :func:`_run_preamble` adds some additional commands to initialise the process group for each run\n and allocating the GPU device number to use before running the supplied function.\n\n Note:\n ``args`` must contain the attributes ``rank``, ``world_size`` and ``dist_url``. These can be\n configured using :func:`config_env_vars` or :func:`config_args`.\n\n Args:\n run (~typing.Callable[[int, ~argparse.Namespace], ~typing.Any]): Function to run with distributed computing.\n args (~argparse.Namespace): Arguments for the run and to specify the variables for distributed computing.\n \"\"\"\n if args.world_size <= 1:\n # Setups up the `wandb` run.\n args.wandb_run = setup_wandb_run(0, args)\n\n # Run the experiment.\n run(0, args)\n\n else: # pragma: no cover\n try:\n mp.spawn(_run_preamble, (run, args), args.ngpus_per_node) # type: ignore[attr-defined]\n except KeyboardInterrupt:\n dist.destroy_process_group() # type: ignore[attr-defined]\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n# TODO: Reduce boilerplate.\n#\n\"\"\"Module to visualise .tiff images, label masks and results from the fitting of neural networks for remote sensing.\n\nAttributes:\n DATA_CONFIG (dict): Config defining the properties of the data used in the experiment.\n IMAGERY_CONFIG (dict): Config defining the properties of the imagery used in the experiment.\n DATA_DIR (list[str] | str): Path to directory holding dataset.\n BAND_IDS (dict): Band IDs and position in sample image.\n MAX_PIXEL_VALUE (int): Maximum pixel value (e.g. 255 for 8-bit integer).\n WGS84 (~rasterio.crs.CRS): WGS84 co-ordinate reference system acting as a\n default :class:`~rasterio.crs.CRS` for transformations.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"DATA_CONFIG\",\n \"IMAGERY_CONFIG\",\n \"DATA_DIR\",\n \"BAND_IDS\",\n \"MAX_PIXEL_VALUE\",\n \"WGS84\",\n \"de_interlace\",\n \"dec_extent_to_deg\",\n \"get_mlp_cmap\",\n \"discrete_heatmap\",\n \"stack_rgb\",\n \"make_rgb_image\",\n \"labelled_rgb_image\",\n \"make_gif\",\n \"prediction_plot\",\n \"seg_plot\",\n \"plot_subpopulations\",\n \"plot_history\",\n \"make_confusion_matrix\",\n \"make_roc_curves\",\n \"plot_embedding\",\n \"format_plot_names\",\n \"plot_results\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport os\nimport random\nfrom pathlib import Path\nfrom typing import Any, Dict, List, Optional, Sequence, Tuple, Union\n\nimport imageio\nimport matplotlib as mlp\nimport matplotlib.pyplot as plt\nimport numpy as np\nfrom alive_progress import alive_bar\nfrom matplotlib import offsetbox\nfrom matplotlib.colors import Colormap, ListedColormap\nfrom matplotlib.gridspec import GridSpec\nfrom matplotlib.image import AxesImage\nfrom matplotlib.ticker import MaxNLocator\nfrom matplotlib.transforms import Bbox\nfrom nptyping import Float, Int, NDArray, Shape\nfrom numpy.typing import ArrayLike\nfrom rasterio.crs import CRS\nfrom scipy import stats\nfrom sklearn.metrics import ConfusionMatrixDisplay\nfrom torchgeo.datasets.utils import BoundingBox\n\nfrom minerva.utils import AUX_CONFIGS, CONFIG, universal_path, utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\nDATA_CONFIG = AUX_CONFIGS.get(\"data_config\")\nIMAGERY_CONFIG = AUX_CONFIGS[\"imagery_config\"]\n\n# Path to directory holding dataset.\nDATA_DIR = CONFIG[\"dir\"][\"data\"]\n\n# Band IDs and position in sample image.\nBAND_IDS = IMAGERY_CONFIG[\"data_specs\"][\"band_ids\"]\n\n# Maximum pixel value (e.g. 255 for 8-bit integer).\nMAX_PIXEL_VALUE = IMAGERY_CONFIG[\"data_specs\"][\"max_value\"]\n\nWGS84 = CRS.from_epsg(4326)\n\n# Automatically fixes the layout of the figures to accommodate the colour bar legends.\nplt.rcParams[\"figure.constrained_layout.use\"] = True\n\n# Increases DPI to avoid strange plotting errors for class heatmaps.\nplt.rcParams[\"figure.dpi\"] = 300\nplt.rcParams[\"savefig.dpi\"] = 300\n\n# Removes margin in x-axis of plots.\nplt.rcParams[\"axes.xmargin\"] = 0\n\n# Filters out all TensorFlow messages other than errors.\nos.environ[\"TF_CPP_MIN_LOG_LEVEL\"] = \"3\"\n\n_MAX_SAMPLES = 25\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef de_interlace(x: Sequence[Any], f: int) -> NDArray[Any, Any]:\n \"\"\"Separates interlaced arrays, ``x`` at a frequency of ``f`` from each other.\n\n Args:\n x (~typing.Sequence[~typing.Any]): Array of data to be de-interlaced.\n f (int): Frequency at which interlacing occurs. Equivalent to number of sources interlaced together.\n\n Returns:\n ~numpy.ndarray[~typing.Any]: De-interlaced array. Each source array is now sequentially connected.\n \"\"\"\n new_x: List[NDArray[Any, Any]] = []\n for i in range(f):\n x_i = []\n for j in np.arange(start=i, stop=len(x), step=f):\n x_i.append(x[j])\n new_x.append(np.array(x_i).flatten())\n\n return np.array(new_x).flatten()\n\n\ndef dec_extent_to_deg(\n shape: Tuple[int, int],\n bounds: BoundingBox,\n src_crs: CRS,\n new_crs: CRS = WGS84,\n spacing: int = 32,\n) -> Tuple[Tuple[int, int, int, int], NDArray[Any, Float], NDArray[Any, Float]]:\n \"\"\"Gets the extent of the image with ``shape`` and with ``bounds`` in latitude, longitude of system ``new_crs``.\n\n Args:\n shape (tuple[int, int]): 2D shape of image to be used to define the extents of the composite image.\n bounds (~torchgeo.datasets.utils.BoundingBox): Object describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n src_crs (~rasterio.crs.CRS): Source co-ordinate reference system (CRS).\n new_crs (~rasterio.crs.CRS): Optional; The co-ordinate reference system (CRS) to transform to.\n spacing (int): Spacing of the lat - lon ticks.\n\n Returns:\n tuple[tuple[int, int, int, int], ~numpy.ndarray[float], ~numpy.ndarray[float]]:\n * The corners of the image in pixel co-ordinates e.g. ``(0, 256, 0, 256)``.\n * The latitude extent of the image with ticks at intervals defined by ``spacing``.\n * The longitude extent of the image with ticks at intervals defined by ``spacing``.\n \"\"\"\n # Defines the 'extent' for a composite image based on the size of shape.\n extent = 0, shape[0], 0, shape[1]\n\n # Gets the co-ordinates of the corners of the image in decimal lat-lon.\n corners = utils.transform_coordinates(\n x=[bounds.minx, bounds.maxx],\n y=[bounds.miny, bounds.maxy],\n src_crs=src_crs,\n new_crs=new_crs,\n )\n\n # Creates a discrete mapping of the spaced ticks to latitude longitude extent of the image.\n lat_extent = np.around(\n np.linspace(\n start=corners[1][0],\n stop=corners[1][1],\n num=int(shape[0] / spacing) + 1,\n endpoint=True,\n ),\n decimals=3,\n )\n lon_extent = np.around(\n np.linspace(\n start=corners[0][0],\n stop=corners[0][1],\n num=int(shape[0] / spacing) + 1,\n endpoint=True,\n ),\n decimals=3,\n )\n\n return extent, lat_extent, lon_extent\n\n\ndef get_mlp_cmap(\n cmap_style: Optional[Union[Colormap, str]] = None, n_classes: Optional[int] = None\n) -> Optional[Colormap]:\n \"\"\"Creates a cmap from query\n\n Args:\n cmap_style (~matplotlib.colors.Colormap | str): Optional; :mod:`matplotlib` colourmap style to get.\n n_classes (int): Optional; Number of classes in data to assign colours to.\n\n Returns:\n ~matplotlib.colors.Colormap | None:\n * If ``cmap_style`` and ``n_classes`` provided, returns a :class:`~matplotlib.colors.ListedColormap` instance.\n * If ``cmap_style`` provided but no ``n_classes``, returns a :class:`~matplotlib.colors.Colormap` instance.\n * If neither arguments are provided, ``None`` is returned.\n \"\"\"\n cmap: Optional[Colormap] = None\n\n if cmap_style:\n if isinstance(cmap_style, str):\n cmap = mlp.colormaps[cmap_style] # type: ignore\n else:\n cmap = cmap_style\n\n if n_classes:\n assert isinstance(cmap, Colormap)\n cmap = cmap.resampled(n_classes) # type: ignore\n\n return cmap\n\n\ndef discrete_heatmap(\n data: NDArray[Shape[\"*, *\"], Int], # noqa: F722\n classes: Union[List[str], Tuple[str, ...]],\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n block_size: int = 32,\n) -> None:\n \"\"\"Plots a heatmap with a discrete colour bar. Designed for Radiant Earth MLHub 256x256 SENTINEL images.\n\n Args:\n data (~numpy.ndarray[int]): 2D Array of data to be plotted as a heat map.\n classes (list[str]): Optional; List of all possible class labels.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n block_size (int): Optional; Size of block image subdivision in pixels.\n \"\"\"\n # Initialises a figure.\n plt.figure()\n\n # Creates a cmap from query.\n cmap = get_mlp_cmap(cmap_style, len(classes))\n\n # Plots heatmap onto figure.\n heatmap = plt.imshow(data, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5) # type: ignore[arg-type]\n\n # Sets tick intervals to block size. Default 32 x 32.\n plt.xticks(np.arange(0, data.shape[0] + 1, block_size))\n plt.yticks(np.arange(0, data.shape[1] + 1, block_size))\n\n # Add grid overlay.\n plt.grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n # Plots colour bar onto figure.\n clb = plt.colorbar(heatmap, ticks=np.arange(0, len(classes)), shrink=0.77)\n\n # Sets colour bar ticks to class labels.\n clb.ax.set_yticklabels(classes)\n\n # Display figure.\n plt.show(block=False)\n\n # Close figure.\n plt.close()\n\n\ndef stack_rgb(\n image: NDArray[Shape[\"3, *, *\"], Float], # noqa: F722\n rgb: Dict[str, int] = BAND_IDS,\n max_value: int = MAX_PIXEL_VALUE,\n) -> NDArray[Shape[\"*, *, 3\"], Float]: # noqa: F722\n \"\"\"Stacks together red, green and blue image bands to create a RGB array.\n\n Args:\n image (~numpy.ndarray[float]): Image of separate channels to be normalised\n and reshaped into stacked RGB image.\n rgb (dict[str, int]): Optional; Dictionary of which channels in image are the R, G & B bands.\n max_value (int): Optional; The maximum pixel value in ``image``. e.g. for 8 bit this will be 255.\n\n Returns:\n ~numpy.ndarray[float]: Normalised and stacked red, green, blue arrays into RGB array.\n \"\"\"\n\n # Extract R, G, B bands from image and normalise.\n channels: List[Any] = []\n for channel in [\"R\", \"G\", \"B\"]:\n band = image[rgb[channel]] / max_value\n channels.append(band)\n\n # Stack together RGB bands.\n # Note that it has to be order BGR not RGB due to the order numpy stacks arrays.\n rgb_image: NDArray[Shape[\"3, *, *\"], Any] = np.dstack( # noqa: F722\n (channels[2], channels[1], channels[0])\n )\n assert isinstance(rgb_image, np.ndarray)\n return rgb_image\n\n\ndef make_rgb_image(\n image: NDArray[Shape[\"3, *, *\"], Float], # noqa: F722\n rgb: Dict[str, int],\n block_size: int = 32,\n) -> AxesImage:\n \"\"\"Creates an RGB image from a composition of red, green and blue bands.\n\n Args:\n image (~numpy.ndarray[int]): Array representing the image of shape ``(bands x height x width)``.\n rgb (dict[str, int]): Dictionary of channel numbers of R, G & B bands within ``image``.\n block_size (int): Optional; Size of block image sub-division in pixels.\n\n Returns:\n ~matplotlib.image.AxesImage: Plotted RGB image object.\n \"\"\"\n # Stack RGB image data together.\n rgb_image_array = stack_rgb(image, rgb)\n\n # Create RGB image.\n rgb_image = plt.imshow(rgb_image_array)\n\n # Sets tick intervals to block size. Default 32 x 32.\n plt.xticks(np.arange(0, rgb_image_array.shape[0] + 1, block_size))\n plt.yticks(np.arange(0, rgb_image_array.shape[1] + 1, block_size))\n\n # Add grid overlay.\n plt.grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n plt.show(block=False)\n\n return rgb_image\n\n\ndef labelled_rgb_image(\n image: NDArray[Shape[\"*, *, 3\"], Float], # noqa: F722\n mask: NDArray[Shape[\"*, *\"], Int], # noqa: F722\n bounds: BoundingBox,\n src_crs: CRS,\n path: Union[str, Path],\n name: str,\n classes: Union[List[str], Tuple[str, ...]],\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n new_crs: Optional[CRS] = WGS84,\n block_size: int = 32,\n alpha: float = 0.5,\n show: bool = True,\n save: bool = True,\n figdim: Tuple[Union[int, float], Union[int, float]] = (8.02, 10.32),\n) -> Path:\n \"\"\"Produces a layered image of an RGB image, and it's associated label mask heat map alpha blended on top.\n\n Args:\n image (~numpy.ndarray[int]): Array representing the image of shape ``(height x width x bands)``.\n mask (~numpy.ndarray[int]): Ground truth mask. Should be of shape (height x width) matching ``image``.\n bounds (~torchgeo.datasets.utils.BoundingBox): Object describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n src_crs (~rasterio.crs.CRS): Source co-ordinate reference system (CRS).\n path (str): Path to where to save created figure.\n name (str): Name of figure. Will be used for title and in the filename.\n classes (list[str]): Optional; List of all possible class labels.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n new_crs (~rasterio.crs.CRS): Optional; The co-ordinate reference system (CRS) to transform to.\n block_size (int): Optional; Size of block image subdivision in pixels.\n alpha (float): Optional; Fraction determining alpha blending of label mask.\n show (bool): Optional; Show the figure when plotted.\n save (bool): Optional; Save the figure to ``path``.\n figdim (tuple[int | float, int | float]): Optional; Figure (height, width) in inches.\n\n Returns:\n str: Path to figure save location.\n \"\"\"\n # Checks that the mask and image shapes will align.\n mask_shape: Tuple[int, int] = mask.shape # type: ignore[assignment]\n assert mask_shape == image.shape[:2]\n\n assert new_crs is not None\n\n # Gets the extent of the image in pixel, lattitude and longitude dimensions.\n extent, lat_extent, lon_extent = dec_extent_to_deg(\n mask_shape,\n bounds=bounds,\n src_crs=src_crs,\n spacing=block_size,\n new_crs=new_crs,\n )\n\n # Initialises a figure.\n fig, ax1 = plt.subplots()\n\n # Create RGB image.\n ax1.imshow(image, extent=extent)\n\n # Creates a cmap from query.\n cmap = get_mlp_cmap(cmap_style, len(classes))\n\n # Plots heatmap onto figure.\n heatmap = ax1.imshow(\n mask, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5, extent=extent, alpha=alpha # type: ignore[arg-type]\n )\n\n # Sets tick intervals to standard 32x32 block size.\n ax1.set_xticks(np.arange(0, mask.shape[0] + 1, block_size))\n ax1.set_yticks(np.arange(0, mask.shape[1] + 1, block_size))\n\n # Creates a secondary x and y-axis to hold lat-lon.\n ax2 = ax1.twiny().twinx()\n\n # Plots an invisible line across the diagonal of the image to create the secondary axis for lat-lon.\n ax2.plot(\n lon_extent,\n lat_extent,\n \" \",\n clip_box=Bbox.from_extents(\n lon_extent[0], lat_extent[0], lon_extent[-1], lat_extent[-1]\n ),\n )\n\n # Set ticks for lat-lon.\n ax2.set_xticks(lon_extent)\n ax2.set_yticks(lat_extent)\n\n # Sets the limits of the secondary axis, so they should align with the primary.\n ax2.set_xlim(left=lon_extent[0], right=lon_extent[-1])\n ax2.set_ylim(top=lat_extent[-1], bottom=lat_extent[0])\n\n # Converts the decimal lat-lon into degrees, minutes, seconds to label the axis.\n lat_labels = utils.dec2deg(lat_extent, axis=\"lat\")\n lon_labels = utils.dec2deg(lon_extent, axis=\"lon\")\n\n # Sets the secondary axis tick labels.\n ax2.set_xticklabels(lon_labels, fontsize=11)\n ax2.set_yticklabels(lat_labels, fontsize=10, rotation=-30, ha=\"left\")\n\n # Add grid overlay.\n ax1.grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n # Plots colour bar onto figure.\n clb = plt.colorbar(\n heatmap, ticks=np.arange(0, len(classes)), shrink=0.9, aspect=75, drawedges=True\n )\n\n # Sets colour bar ticks to class labels.\n clb.ax.set_yticklabels(classes, fontsize=11)\n\n # Bodge to get a figure title by using the colour bar title.\n clb.ax.set_title(f\"{name}\\nLand Cover\", loc=\"left\", fontsize=15)\n\n # Set axis labels.\n ax1.set_xlabel(\"(x) - Pixel Position\", fontsize=14)\n ax1.set_ylabel(\"(y) - Pixel Position\", fontsize=14)\n ax2.set_ylabel(\"Latitude\", fontsize=14, rotation=270, labelpad=12)\n ax2.set_title(\"Longitude\") # Bodge\n\n # Manual trial and error fig size which fixes aspect ratio issue.\n fig.set_figheight(figdim[0])\n fig.set_figwidth(figdim[1])\n\n # Display figure.\n if show:\n plt.show(block=False)\n\n # Path and file name of figure.\n fn = Path(f\"{path}/{name}_RGBHM.png\")\n\n # If true, save file to fn.\n if save:\n # Checks if file already exists. Deletes if true.\n utils.exist_delete_check(fn)\n\n # Save figure to fn.\n fig.savefig(fn)\n\n # Close figure.\n plt.close()\n\n return fn\n\n\ndef make_gif(\n dates: Sequence[str],\n images: NDArray[Shape[\"*, *, *, 3\"], Any], # noqa: F722\n masks: NDArray[Shape[\"*, *, *\"], Any], # noqa: F722\n bounds: BoundingBox,\n src_crs: CRS,\n classes: Union[List[str], Tuple[str, ...]],\n gif_name: str,\n path: Union[str, Path],\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n fps: float = 1.0,\n new_crs: Optional[CRS] = WGS84,\n alpha: float = 0.5,\n figdim: Tuple[Union[int, float], Union[int, float]] = (8.02, 10.32),\n) -> None:\n \"\"\"Wrapper to :func:`labelled_rgb_image` to make a GIF for a patch out of scenes.\n\n Args:\n dates (~typing.Sequence[str]): Dates of scenes to be used as the frames in the GIF.\n images (~numpy.ndarray[~typing.Any]): All the frames of imagery to make the GIF from.\n Leading dimension must be the same length as ``dates`` and ``masks``.\n masks (~numpy.ndarray[~typing.Any]): The masks for each frame of the GIF.\n Leading dimension must be the same length as ``dates`` and ``image``.\n bounds (~torchgeo.datasets.utils.BoundingBox): The bounding box (in the ``src_crs`` CRS) of the\n :term:`patch` the ``GIF`` will be of.\n src_crs (~rasterio.crs.CRS): Source co-ordinate reference system (CRS).\n classes (list[str]): List of all possible class labels.\n gif_name (str): Path to and name of GIF to be made.\n path (~pathlib.Path | str]): Path to where to save frames of the ``GIF``.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n fps (float): Optional; Frames per second of ``GIF``.\n new_crs (~rasterio.crs.CRS): Optional; The co-ordinate reference system (CRS) to transform to.\n alpha (float): Optional; Fraction determining alpha blending of label mask.\n figdim (tuple[int | float, int | float]): Optional; Figure (height, width) in inches.\n\n Returns:\n None\n \"\"\"\n # Changes to `imagio` now mean we need the duration of the GIF and not the `fps`.\n duration = len(dates) / fps\n\n # Initialise progress bar.\n with alive_bar(len(dates), bar=\"blocks\") as bar:\n # List to hold filenames and paths of images created.\n frames = []\n for i in range(len(dates)):\n # Update progress bar with current scene.\n bar.text(\"SCENE ON %s\" % dates[i])\n\n # Create a frame of the GIF for a scene of the patch.\n frame = labelled_rgb_image(\n images[i],\n masks[i],\n bounds,\n src_crs,\n path,\n name=f\"{i}\",\n classes=classes,\n cmap_style=cmap_style,\n new_crs=new_crs,\n alpha=alpha,\n save=True,\n show=False,\n figdim=figdim,\n )\n\n # Read in frame just created and add to list of frames.\n frames.append(imageio.imread(frame))\n\n # Update bar with step completion.\n bar()\n\n # Checks GIF doesn't already exist. Deletes if it does.\n utils.exist_delete_check(gif_name)\n\n # Create a 'unknown' bar to 'spin' while the GIF is created.\n with alive_bar(unknown=\"waves\") as bar:\n # Add current operation to spinner bar.\n bar.text(\"MAKING PATCH GIF\")\n\n # Create GIF.\n imageio.mimwrite(gif_name, frames, format=\".gif\", duration=duration) # type: ignore\n\n\ndef prediction_plot(\n sample: Dict[str, Any],\n sample_id: str,\n classes: Dict[int, str],\n src_crs: CRS,\n new_crs: CRS = WGS84,\n cmap_style: Optional[Union[str, ListedColormap]] = None,\n exp_id: Optional[str] = None,\n fig_dim: Optional[Tuple[Union[int, float], Union[int, float]]] = None,\n block_size: int = 32,\n show: bool = True,\n save: bool = True,\n fn_prefix: Optional[Union[str, Path]] = None,\n) -> None:\n \"\"\"\n Produces a figure containing subplots of the predicted label mask, the ground truth label mask\n and a reference RGB image of the same patch.\n\n Args:\n sample (dict[str, ~typing.Any]): Dictionary holding the ``\"image\"``, ground truth (``\"mask\"``)\n and predicted (``\"pred\"``) masks and the bounding box for this sample.\n sample_id (str): ID for the sample.\n classes (dict[int, str]): Dictionary mapping class labels to class names.\n src_crs (~rasterio.crs.CRS): Existing co-ordinate system of the image.\n new_crs(~rasterio.crs.CRS): Optional; Co-ordinate system to convert image to and use for labelling.\n exp_id (str): Optional; Unique ID for the experiment run that predictions and labels come from.\n block_size (int): Optional; Size of block image sub-division in pixels.\n cmap_style (str | ~matplotlib.colors.ListedColormap): Optional; Name or object for colour map style.\n show (bool): Optional; Show the figure when plotted.\n save (bool): Optional; Save the figure to file to ``fn_prefix``.\n fig_dim (tuple[float, float]): Optional; Figure (height, width) in inches.\n fn_prefix (str | ~pathlib.Path): Optional; Common filename prefix (including path to file) for all plots of\n this type from this experiment. Appended with the sample ID to give the filename to save the plot to.\n\n Returns:\n None\n \"\"\"\n # Stacks together the R, G, & B bands to form an array of the RGB image.\n rgb_image = sample[\"image\"]\n z = sample[\"pred\"]\n y = sample[\"mask\"]\n bounds = sample[\"bounds\"]\n\n extent, lat_extent, lon_extent = dec_extent_to_deg(\n y.shape, bounds, src_crs, new_crs=new_crs, spacing=block_size\n )\n\n centre = utils.transform_coordinates(\n *utils.get_centre_loc(bounds), src_crs=src_crs, new_crs=new_crs\n )\n\n # Initialises a figure.\n fig = plt.figure(figsize=fig_dim)\n\n gs = GridSpec(nrows=2, ncols=2, figure=fig)\n\n axes: NDArray[Shape[\"3\"], Any] = np.array(\n [\n fig.add_subplot(gs[0, 0]),\n fig.add_subplot(gs[0, 1]),\n fig.add_subplot(gs[1, :]),\n ]\n )\n\n cmap = get_mlp_cmap(cmap_style, len(classes))\n\n # Plots heatmap onto figure.\n z_heatmap = axes[0].imshow(z, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5)\n _ = axes[1].imshow(y, cmap=cmap, vmin=-0.5, vmax=len(classes) - 0.5)\n\n # Create RGB image.\n axes[2].imshow(rgb_image, extent=extent)\n\n # Sets tick intervals to standard 32x32 block size.\n axes[0].set_xticks(np.arange(0, z.shape[0] + 1, block_size))\n axes[0].set_yticks(np.arange(0, z.shape[1] + 1, block_size))\n\n axes[1].set_xticks(np.arange(0, y.shape[0] + 1, block_size))\n axes[1].set_yticks(np.arange(0, y.shape[1] + 1, block_size))\n\n axes[2].set_xticks(np.arange(0, rgb_image.shape[0] + 1, block_size))\n axes[2].set_yticks(np.arange(0, rgb_image.shape[1] + 1, block_size))\n\n # Add grid overlay.\n axes[0].grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n axes[1].grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n axes[2].grid(which=\"both\", color=\"#CCCCCC\", linestyle=\":\")\n\n # Converts the decimal lat-lon into degrees, minutes, seconds to label the axis.\n lat_labels = utils.dec2deg(lat_extent, axis=\"lat\")\n lon_labels = utils.dec2deg(lon_extent, axis=\"lon\")\n\n # Sets the secondary axis tick labels.\n axes[2].set_xticklabels(lon_labels, fontsize=9, rotation=30)\n axes[2].set_yticklabels(lat_labels, fontsize=9)\n\n # Plots colour bar onto figure.\n clb = fig.colorbar(\n z_heatmap,\n ax=axes.ravel().tolist(),\n location=\"top\",\n ticks=np.arange(0, len(classes)),\n aspect=75,\n drawedges=True,\n )\n\n # Sets colour bar ticks to class labels.\n clb.ax.set_xticklabels(classes.values(), fontsize=9)\n\n # Set figure title and subplot titles.\n fig.suptitle(\n f\"{sample_id}: {utils.lat_lon_to_loc(lat=str(centre[1]), lon=str(centre[0]))}\",\n fontsize=15,\n )\n axes[0].set_title(\"Predicted\", fontsize=13)\n axes[1].set_title(\"Ground Truth\", fontsize=13)\n axes[2].set_title(\"Reference Imagery\", fontsize=13)\n\n # Set axis labels.\n axes[0].set_xlabel(\"(x) - Pixel Position\", fontsize=10)\n axes[0].set_ylabel(\"(y) - Pixel Position\", fontsize=10)\n axes[1].set_xlabel(\"(x) - Pixel Position\", fontsize=10)\n axes[1].set_ylabel(\"(y) - Pixel Position\", fontsize=10)\n axes[2].set_xlabel(\"Longitude\", fontsize=10)\n axes[2].set_ylabel(\"Latitude\", fontsize=10)\n\n # Display figure.\n if show:\n plt.show(block=False)\n\n if fn_prefix is None:\n path = universal_path(CONFIG[\"dir\"][\"results\"])\n fn_prefix = str(path / f\"{exp_id}_{utils.timestamp_now()}_Mask\")\n\n # Path and file name of figure.\n fn = f\"{fn_prefix}_{sample_id}.png\"\n\n # If true, save file to fn.\n if save:\n # Checks if file already exists. Deletes if true.\n utils.exist_delete_check(fn)\n\n # Save figure to fn.\n fig.savefig(fn)\n\n # Close figure.\n plt.close()\n\n\ndef seg_plot(\n z: Union[List[int], NDArray[Any, Any]],\n y: Union[List[int], NDArray[Any, Any]],\n ids: List[str],\n bounds: Union[Sequence[Any], NDArray[Any, Any]],\n mode: str,\n classes: Dict[int, str],\n colours: Dict[int, str],\n fn_prefix: Union[str, Path],\n frac: float = 0.05,\n fig_dim: Optional[Tuple[Union[int, float], Union[int, float]]] = (9.3, 10.5),\n) -> None:\n \"\"\"Custom function for pre-processing the outputs from image segmentation testing for data visualisation.\n\n Args:\n z (list[float]): Predicted segmentation masks by the network.\n y (list[float]): Corresponding ground truth masks.\n ids (list[str]): Corresponding patch IDs for the test data supplied to the network.\n bounds (list[~torchgeo.datasets.utils.BoundingBox] | ~numpy.ndarray[~torchgeo.datasets.utils.BoundingBox]):\n Array of objects describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n mode (str): Mode samples are from. Must be ``'train'``, ``'val'`` or ``'test'``.\n classes (dict[int, str]): Dictionary mapping class labels to class names.\n colours (dict[int, str]): Dictionary mapping class labels to colours.\n fn_prefix (str | ~pathlib.Path): Common filename prefix (including path to file) for all plots of this type\n from this experiment to use.\n frac (float): Optional; Fraction of patch samples to plot.\n fig_dim (tuple[float, float]): Optional; Figure (height, width) in inches.\n\n Returns:\n None\n \"\"\"\n # TODO: This is a very naughty way of avoiding a circular import.\n # Need to reorganise package to avoid need for this.\n from minerva.datasets import make_dataset\n\n if not isinstance(z, np.ndarray):\n z = np.array(z)\n\n if not isinstance(y, np.ndarray):\n y = np.array(y)\n\n z = np.reshape(z, (z.shape[0] * z.shape[1], z.shape[2], z.shape[3]))\n y = np.reshape(y, (y.shape[0] * y.shape[1], y.shape[2], y.shape[3]))\n flat_ids: NDArray[Any, Any] = np.array(ids).flatten()\n\n print(\"\\nRE-CONSTRUCTING DATASET\")\n dataset, _ = make_dataset(CONFIG[\"dir\"][\"data\"], CONFIG[\"dataset_params\"][mode])\n\n # Create a new projection system in lat-lon.\n crs = dataset.crs\n\n print(\"\\nPRODUCING PREDICTED MASKS\")\n\n # Limits number of masks to produce to a fractional number of total and no more than `_MAX_SAMPLES`.\n n_samples = int(frac * len(flat_ids))\n if n_samples > _MAX_SAMPLES:\n n_samples = _MAX_SAMPLES\n\n # Initialises a progress bar for the epoch.\n with alive_bar(n_samples, bar=\"blocks\") as bar:\n # Plots the predicted versus ground truth labels for all test patches supplied.\n for i in random.sample(range(len(flat_ids)), n_samples):\n image = stack_rgb(dataset[bounds[i]][\"image\"].numpy())\n sample = {\"image\": image, \"pred\": z[i], \"mask\": y[i], \"bounds\": bounds[i]}\n\n prediction_plot(\n sample,\n flat_ids[i],\n classes=classes,\n src_crs=crs,\n exp_id=CONFIG[\"model_name\"],\n show=False,\n fn_prefix=fn_prefix,\n fig_dim=fig_dim,\n cmap_style=ListedColormap(colours.values(), N=len(colours)), # type: ignore\n )\n\n bar()\n\n\ndef plot_subpopulations(\n class_dist: List[Tuple[int, int]],\n class_names: Dict[int, str],\n cmap_dict: Dict[int, str],\n filename: Optional[Union[str, Path]] = None,\n save: bool = True,\n show: bool = False,\n) -> None:\n \"\"\"Creates a pie chart of the distribution of the classes within the data.\n\n Args:\n class_dist (list[tuple[int, int]]): Modal distribution of classes in the dataset provided.\n class_names (dict[int, str]): Optional; Dictionary mapping class labels to class names.\n cmap_dict (dict[int, str]): Optional; Dictionary mapping class labels to class colours.\n filename (str): Optional; Name of file to save plot to.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n\n Returns:\n None\n \"\"\"\n # List to hold the name and percentage distribution of each class in the data as str.\n class_data = []\n\n # List to hold the total counts of each class.\n counts = []\n\n # List to hold colours of classes in the correct order.\n colours = []\n\n # Finds total number of samples to normalise data.\n n_samples = 0\n for mode in class_dist:\n n_samples += mode[1]\n\n # For each class, find the percentage of data that is that class and the total counts for that class.\n for label in class_dist:\n # Sets percentage label to <0.01% for classes matching that equality.\n if (label[1] * 100.0 / n_samples) > 0.01:\n class_data.append(\n \"{} \\n{:.2f}%\".format(\n class_names[label[0]], (label[1] * 100.0 / n_samples)\n )\n )\n else:\n class_data.append(\"{} \\n<0.01%\".format(class_names[label[0]]))\n counts.append(label[1])\n colours.append(cmap_dict[label[0]])\n\n # Locks figure size.\n plt.figure(figsize=(6, 5))\n\n # Plot a pie chart of the data distribution amongst the classes.\n patches, _ = plt.pie(\n counts, colors=colours, explode=[i * 0.05 for i in range(len(class_data))]\n )\n\n # Adds legend.\n plt.legend(\n patches, class_data, loc=\"center left\", bbox_to_anchor=(1, 0.5), frameon=False\n )\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n plt.close()\n\n\ndef plot_history(\n metrics: Dict[str, Any],\n filename: Optional[Union[str, Path]] = None,\n save: bool = True,\n show: bool = False,\n) -> None:\n \"\"\"Plots model history based on metrics supplied.\n\n Args:\n metrics (dict[str, ~typing.Any]): Dictionary containing the names and results of the metrics\n by which model was assessed.\n filename (str): Optional; Name of file to save plot to.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n\n Returns:\n None\n \"\"\"\n # Initialise figure.\n ax = plt.figure().gca()\n\n # Plots each metric in metrics, appending their artist handles.\n handles = []\n labels = []\n for key in metrics:\n # Checks that the length of x matches y and is greater than 1 so can be plotted.\n if len(metrics[key][\"x\"]) == len(metrics[key][\"y\"]) >= 1.0:\n # Plot metric.\n handles.append(ax.plot(metrics[key][\"x\"], metrics[key][\"y\"])[0])\n labels.append(key)\n\n # Creates legend from plot artist handles and names of metrics.\n ax.legend(handles=handles, labels=labels)\n\n # Forces x-axis ticks to be integers.\n ax.xaxis.set_major_locator(MaxNLocator(integer=True))\n\n # Adds a grid overlay with green dashed lines.\n ax.grid(color=\"green\", linestyle=\"--\", linewidth=0.5) # For some funky gridlines\n\n # Adds axis labels.\n ax.set_xlabel(\"Epoch\")\n ax.set_ylabel(\"Loss/Accuracy\")\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n plt.close()\n\n\ndef make_confusion_matrix(\n pred: Union[List[int], NDArray[Any, Int]],\n labels: Union[List[int], NDArray[Any, Int]],\n classes: Dict[int, str],\n filename: Optional[Union[str, Path]] = None,\n cmap_style: str = \"Blues\",\n show: bool = True,\n save: bool = False,\n) -> None:\n \"\"\"Creates a heat-map of the confusion matrix of the given model.\n\n Args:\n pred(list[int]): Predictions made by model on test images.\n labels (list[int]): Accompanying ground truth labels for testing images.\n classes (dict[int, str]): Dictionary mapping class labels to class names.\n filename (str): Optional; Name of file to save plot to.\n cmap_style (str): Colourmap style to use in the confusion matrix.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n\n Returns:\n None\n \"\"\"\n _pred, _labels, new_classes = utils.check_test_empty(pred, labels, classes)\n\n # Extract class names from dict in numeric order to ensure labels match matrix.\n class_names = [new_classes[key] for key in range(len(new_classes.keys()))]\n\n if DATA_CONFIG is not None:\n figsize = DATA_CONFIG[\"fig_sizes\"][\"CM\"]\n else: # pragma: no cover\n figsize = None\n\n # Creates the figure to plot onto.\n ax = plt.figure(figsize=figsize).gca()\n\n # Get a matplotlib colourmap based on the style specified to use for the confusion matrix.\n cmap = get_mlp_cmap(cmap_style)\n\n # Creates, plots and normalises the confusion matrix.\n cm = ConfusionMatrixDisplay.from_predictions(\n _labels,\n _pred,\n labels=list(new_classes.keys()),\n normalize=\"all\",\n display_labels=class_names,\n cmap=cmap,\n ax=ax,\n )\n\n # Normalises the colourbar to between [0, 1] for consistent clarity.\n cm.ax_.get_images()[0].set_clim(0, 1)\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n plt.close()\n\n\ndef make_roc_curves(\n probs: ArrayLike,\n labels: Union[Sequence[int], NDArray[Any, Int]],\n class_names: Dict[int, str],\n colours: Dict[int, str],\n micro: bool = True,\n macro: bool = True,\n filename: Optional[Union[str, Path]] = None,\n show: bool = False,\n save: bool = True,\n) -> None:\n \"\"\"Plots ROC curves for each class, the micro and macro average ROC curves and accompanying AUCs.\n\n Adapted from Scikit-learn's example at:\n https://scikit-learn.org/stable/auto_examples/model_selection/plot_roc.html\n\n Args:\n probs (list | ~numpy.ndarray[int]): Array of probabilistic predicted classes from model where each sample\n should have a list of the predicted probability for each class.\n labels (list | ~numpy.ndarray[int]): List of corresponding ground truth labels.\n class_names (dict[int, str]): Dictionary mapping class labels to class names.\n colours (dict[int, str]): Dictionary mapping class labels to colours.\n micro (bool): Optional; Whether to compute and plot the micro average ROC curves.\n macro (bool): Optional; Whether to compute and plot the macro average ROC curves.\n filename (str | ~pathlib.Path): Optional; Name of file to save plot to.\n save (bool): Optional; Whether to save the plots to file.\n show (bool): Optional; Whether to show the plots.\n\n Returns:\n None\n \"\"\"\n # Gets the class labels as a list from the class_names dict.\n class_labels = [key for key in class_names.keys()]\n\n # Reshapes the probabilities to be (n_samples, n_classes).\n probs = np.reshape(probs, (len(labels), len(class_labels)))\n\n # Computes all class, micro and macro average ROC curves and AUCs.\n fpr, tpr, roc_auc = utils.compute_roc_curves(\n probs, labels, class_labels, micro=micro, macro=macro\n )\n\n # Plot all ROC curves\n print(\"\\nPlotting ROC Curves\")\n plt.figure()\n\n if micro:\n # Plot micro average ROC curves.\n plt.plot(\n fpr[\"micro\"],\n tpr[\"micro\"],\n label=\"Micro-average (AUC = {:.2f})\".format(roc_auc[\"micro\"]),\n color=\"deeppink\",\n linestyle=\"dotted\",\n )\n\n if macro:\n # Plot macro average ROC curves.\n plt.plot(\n fpr[\"macro\"],\n tpr[\"macro\"],\n label=\"Macro-average (AUC = {:.2f})\".format(roc_auc[\"macro\"]),\n color=\"navy\",\n linestyle=\"dotted\",\n )\n\n # Plot all class ROC curves.\n for key in class_labels:\n try:\n plt.plot(\n fpr[key],\n tpr[key],\n color=colours[key],\n label=f\"{class_names[key]} \" + \"(AUC = {:.2f})\".format(roc_auc[key]),\n )\n except KeyError:\n pass\n\n # Plot random classifier diagonal.\n plt.plot([0, 1], [0, 1], \"k--\")\n\n # Set limits.\n plt.xlim([0.0, 1.0])\n plt.ylim([0.0, 1.05])\n\n # Set axis labels.\n plt.xlabel(\"False Positive Rate\")\n plt.ylabel(\"True Positive Rate\")\n\n # Position legend in lower right corner of figure where no classifiers should exist.\n plt.legend(loc=\"lower right\")\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n plt.savefig(filename)\n print(\"ROC Curves plot SAVED\")\n plt.close()\n\n\ndef plot_embedding(\n embeddings: Any,\n bounds: Union[Sequence[BoundingBox], NDArray[Any, Any]],\n mode: str,\n title: Optional[str] = None,\n show: bool = False,\n save: bool = True,\n filename: Optional[Union[Path, str]] = None,\n) -> None:\n \"\"\"Using TSNE Clustering, visualises the embeddings from a model.\n\n Args:\n embeddings (~typing.Any): Embeddings from a model.\n bounds (~typing.Sequence[~torchgeo.datasets.utils.BoundingBox] | ~numpy.ndarray[~torchgeo.datasets.utils.BoundingBox]): # noqa: E501\n Array of objects describing a geospatial bounding box.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n mode (str): Mode samples are from. Must be ``'train'``, ``'val'`` or ``'test'``.\n title (str): Optional; Title of plot.\n show (bool): Optional; Whether to show plot.\n save (bool): Optional; Whether to save plot to file.\n filename (str): Optional; Name of file to save plot to.\n\n Returns:\n None\n \"\"\"\n\n x = utils.tsne_cluster(embeddings)\n\n # TODO: This is a very naughty way of avoiding a circular import.\n # Need to reorganise package to avoid need for this.\n from minerva.datasets import make_dataset\n\n print(\"\\nRE-CONSTRUCTING DATASET\")\n dataset, _ = make_dataset(CONFIG[\"dir\"][\"data\"], CONFIG[\"dataset_params\"][mode])\n\n images = []\n targets = []\n\n # Initialises a progress bar for the epoch.\n with alive_bar(len(x), bar=\"blocks\") as bar:\n # Plots the predicted versus ground truth labels for all test patches supplied.\n for i in range(len(x)):\n sample = dataset[bounds[i]]\n images.append(stack_rgb(sample[\"image\"].numpy()))\n targets.append(\n [\n int(stats.mode(mask.flatten(), keepdims=False).mode)\n for mask in sample[\"mask\"].numpy()\n ]\n )\n\n bar()\n\n x_min, x_max = np.min(x, 0), np.max(x, 0)\n x = (x - x_min) / (x_max - x_min)\n\n plt.figure(figsize=(10, 10))\n ax = plt.subplot(111)\n\n for i in range(len(x)):\n plt.text(\n x[i, 0],\n x[i, 1],\n str(targets[i]),\n color=plt.cm.Set1(targets[i][0] / 10.0), # type: ignore\n fontdict={\"weight\": \"bold\", \"size\": 9},\n )\n\n if hasattr(offsetbox, \"AnnotationBbox\"):\n # only print thumbnails with matplotlib > 1.0\n shown_images: NDArray[Any, Any] = np.array([[1.0, 1.0]]) # just something big\n\n for i in range(len(images)):\n dist = np.sum((x[i] - shown_images) ** 2, 1)\n if np.min(dist) < 4e-3:\n # don’t show points that are too close\n continue # pragma: no cover\n\n shown_images = np.r_[shown_images, [x[i]]]\n imagebox = offsetbox.AnnotationBbox(\n offsetbox.OffsetImage(images[i], cmap=plt.cm.gray_r), x[i] # type: ignore\n )\n\n ax.add_artist(imagebox)\n\n plt.xticks([]), plt.yticks([]) # type: ignore\n\n if title is not None:\n plt.title(title)\n\n # Shows and/or saves plot.\n if show:\n plt.show(block=False)\n if save:\n if filename is None: # pragma: no cover\n filename = \"tsne_cluster_vis.png\"\n os.makedirs(Path(filename).parent, exist_ok=True)\n plt.savefig(filename)\n print(\"TSNE cluster visualisation SAVED\")\n plt.close()\n\n\ndef format_plot_names(\n model_name: str, timestamp: str, path: Union[Sequence[str], str, Path]\n) -> Dict[str, Path]:\n \"\"\"Creates unique filenames of plots in a standardised format.\n\n Args:\n model_name (str): Name of model. e.g. ``\"MLP-MkVI\"``.\n timestamp (str): Time and date to be used to identify experiment.\n path (list[str] | str | ~pathlib.Path]): Path to the directory for storing plots as a :class:`list`\n of strings for each level.\n\n Returns:\n filenames (dict[str, ~pathlib.Path]): Formatted filenames for plots.\n \"\"\"\n\n def standard_format(plot_type: str, *sub_dir) -> str:\n \"\"\"Creates a unique filename for a plot in a standardised format.\n\n Args:\n plot_type (str): Plot type to use in filename.\n sub_dir (str): Additional subdirectories to add to path to filename.\n\n Returns:\n str: String of path to filename of the form ``\"{model_name}_{timestamp}_{plot_type}.{file_ext}\"``\n \"\"\"\n filename = f\"{model_name}_{timestamp}_{plot_type}\"\n return str(universal_path(path) / universal_path(sub_dir) / filename)\n\n filenames = {\n \"History\": Path(standard_format(\"MH\") + \".png\"),\n \"Pred\": Path(standard_format(\"TP\") + \".png\"),\n \"CM\": Path(standard_format(\"CM\") + \".png\"),\n \"ROC\": Path(standard_format(\"ROC\" + \".png\")),\n \"Mask\": Path(standard_format(\"Mask\", \"Masks\")),\n \"PvT\": Path(standard_format(\"PvT\", \"PvTs\")),\n \"TSNE\": Path(standard_format(\"TSNE\") + \".png\"),\n }\n\n return filenames\n\n\ndef plot_results(\n plots: Dict[str, bool],\n z: Optional[Union[List[int], NDArray[Any, Int]]] = None,\n y: Optional[Union[List[int], NDArray[Any, Int]]] = None,\n metrics: Optional[Dict[str, Any]] = None,\n ids: Optional[List[str]] = None,\n mode: str = \"test\",\n bounds: Optional[NDArray[Any, Any]] = None,\n probs: Optional[Union[List[float], NDArray[Any, Float]]] = None,\n embeddings: Optional[NDArray[Any, Any]] = None,\n class_names: Optional[Dict[int, str]] = None,\n colours: Optional[Dict[int, str]] = None,\n save: bool = True,\n show: bool = False,\n model_name: Optional[str] = None,\n timestamp: Optional[str] = None,\n results_dir: Optional[Union[Sequence[str], str, Path]] = None,\n) -> None:\n \"\"\"Orchestrates the creation of various plots from the results of a model fitting.\n\n Args:\n plots (dict[str, bool]): Dictionary defining which plots to make.\n z (list[list[int]] | ~numpy.ndarray[~numpy.ndarray[int]]): List of predicted label masks.\n y (list[list[int]] | ~numpy.ndarray[~numpy.ndarray[int]]): List of corresponding ground truth label masks.\n metrics (dict[str, ~typing.Any]): Optional; Dictionary containing a log of various metrics used to assess\n the performance of a model.\n ids (list[str]): Optional; List of IDs defining the origin of samples to the model.\n Maybe either patch IDs or scene tags.\n mode (str): Optional; Mode samples are from. Must be ``'train'``, ``'val'`` or ``'test'``.\n bounds (~numpy.ndarray[~torchgeo.datasets.utils.BoundingBox]): Optional; Array of objects describing\n a geospatial bounding box for each sample.\n Must contain ``minx``, ``maxx``, ``miny`` and ``maxy`` parameters.\n probs (list[float] | ~numpy.ndarray[float]): Optional; Array of probabilistic predicted classes\n from model where each sample should have a list of the predicted probability for each class.\n embeddings (~numpy.ndarray[~typing.Any]): Embeddings from the model to visualise with TSNE clustering.\n class_names (dict[int, str]): Optional; Dictionary mapping class labels to class names.\n colours (dict[int, str]): Optional; Dictionary mapping class labels to colours.\n save (bool): Optional; Save the plots to file.\n show (bool): Optional; Show the plots.\n model_name (str): Optional; Name of model. e.g. MLP-MkVI.\n timestamp (str): Optional; Time and date to be used to identify experiment.\n If not specified, the current date-time is used.\n results_dir (list[str] | str | ~pathlib.Path): Optional; Path to the directory for storing plots.\n\n Notes:\n ``save==True``, ``show==False`` regardless of input for plots made for each sample such as PvT or Mask plots.\n\n Returns:\n None\n \"\"\"\n if not show:\n # Ensures that there is no attempt to display figures incase no display is present.\n try:\n mlp.use(\"agg\")\n except ImportError: # pragma: no cover\n pass\n\n flat_z = None\n flat_y = None\n\n if z is not None:\n flat_z = utils.batch_flatten(z)\n\n if y is not None:\n flat_y = utils.batch_flatten(y)\n\n if timestamp is None:\n timestamp = utils.timestamp_now(fmt=\"%d-%m-%Y_%H%M\")\n\n if model_name is None:\n model_name = CONFIG[\"model_name\"]\n assert model_name is not None\n\n if results_dir is None:\n results_dir = CONFIG[\"dir\"][\"results\"]\n assert isinstance(results_dir, (Sequence, str, Path))\n\n filenames = format_plot_names(model_name, timestamp, results_dir)\n\n try:\n os.mkdir(universal_path(results_dir))\n except FileExistsError as err:\n print(err)\n\n if plots.get(\"History\", False):\n assert metrics is not None\n\n print(\"\\nPLOTTING MODEL HISTORY\")\n plot_history(metrics, filename=filenames[\"History\"], save=save, show=show)\n\n if plots.get(\"CM\", False):\n assert class_names is not None\n assert flat_y is not None\n assert flat_z is not None\n\n print(\"\\nPLOTTING CONFUSION MATRIX\")\n make_confusion_matrix(\n labels=flat_y,\n pred=flat_z,\n classes=class_names,\n filename=filenames[\"CM\"],\n save=save,\n show=show,\n )\n\n if plots.get(\"Pred\", False):\n assert class_names is not None\n assert colours is not None\n assert flat_z is not None\n\n print(\"\\nPLOTTING CLASS DISTRIBUTION OF PREDICTIONS\")\n plot_subpopulations(\n utils.find_modes(flat_z),\n class_names=class_names,\n cmap_dict=colours,\n filename=filenames[\"Pred\"],\n save=save,\n show=show,\n )\n\n if plots.get(\"ROC\", False):\n assert class_names is not None\n assert colours is not None\n assert probs is not None\n assert flat_y is not None\n\n print(\"\\nPLOTTING ROC CURVES\")\n make_roc_curves(\n probs,\n flat_y,\n class_names=class_names,\n colours=colours,\n filename=filenames[\"ROC\"],\n micro=plots[\"micro\"],\n macro=plots[\"macro\"],\n save=save,\n show=show,\n )\n\n if plots.get(\"Mask\", False):\n assert class_names is not None\n assert colours is not None\n assert z is not None\n assert y is not None\n assert ids is not None\n assert bounds is not None\n assert mode is not None\n\n figsize = None\n if DATA_CONFIG is not None:\n figsize = DATA_CONFIG[\"fig_sizes\"][\"Mask\"]\n\n flat_bbox = utils.batch_flatten(bounds)\n os.makedirs(universal_path(results_dir) / \"Masks\", exist_ok=True)\n seg_plot(\n z,\n y,\n ids,\n flat_bbox,\n mode,\n fn_prefix=filenames[\"Mask\"],\n classes=class_names,\n colours=colours,\n fig_dim=figsize,\n )\n\n if plots.get(\"TSNE\", False):\n assert embeddings is not None\n assert bounds is not None\n assert mode is not None\n\n print(\"\\nPERFORMING TSNE CLUSTERING\")\n plot_embedding(\n embeddings,\n bounds,\n mode,\n show=show,\n save=save,\n filename=filenames[\"TSNE\"],\n )\n" } } }, @@ -16627,7 +16980,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "4d05bb0d149acc3aed13ea463d2ce5d4c688fc60bcd5c128504d238301e1c16c" + "equalIndicator/v1": "0c15a419b610775db498d2d751673c90a83f4de1a575171d2089981c9b0bc331" }, "properties": { "ideaSeverity": "ERROR" @@ -16645,16 +16998,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/RunTensorBoard.py", + "uri": "minerva/metrics.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3317, + "charLength": 14337, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to run the TensorBoard logs from experiments.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nfrom typing import List, Optional, Union\n\nfrom minerva.utils import utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(\n path: Optional[Union[str, List[str]]] = None,\n env_name: str = \"env2\",\n exp_name: Optional[str] = None,\n host_num: int = 6006,\n) -> None:\n assert exp_name is not None\n\n if isinstance(path, list):\n if len(path) == 1:\n path = path[0]\n\n utils.run_tensorboard(exp_name, path=path, env_name=env_name, host_num=host_num)\n\n\nif __name__ == \"__main__\":\n CLI = argparse.ArgumentParser()\n CLI.add_argument(\n \"--path\", # name on the CLI - drop the `--` for positional/required parameters\n nargs=\"*\", # 0 or more values expected => creates a list\n type=str,\n default=None, # default if nothing is provided\n )\n CLI.add_argument(\n \"--env_name\",\n nargs=\"1\",\n type=str, # any type/callable can be used here\n default=None,\n )\n CLI.add_argument(\n \"--exp_name\",\n nargs=\"1\",\n type=str, # any type/callable can be used here\n default=None,\n )\n CLI.add_argument(\n \"--host_num\",\n nargs=\"1\",\n type=int, # any type/callable can be used here\n default=None,\n )\n\n args = CLI.parse_args()\n main(\n path=args.path,\n env_name=args.env_name,\n exp_name=args.exp_name,\n host_num=args.host_num,\n )\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to calculate the metrics of a model's fitting.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaMetrics\",\n \"SPMetrics\",\n \"SSLMetrics\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom abc import ABC\nfrom typing import Any, Dict, List, Tuple\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaMetrics(ABC):\n \"\"\"Abstract class for metric logging within the :mod:`minerva` framework.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n metric_types: List[str] = []\n special_metric_types: List[str] = []\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n **params,\n ) -> None:\n super(MinervaMetrics, self).__init__()\n\n self.n_batches = n_batches\n self.batch_size = batch_size\n self.data_size = data_size\n\n self.model_type = params.get(\"model_type\", \"scene_classifier\")\n self.sample_pairs = params.get(\"sample_pairs\", False)\n\n self.modes = params.get(\"modes\", [\"train\", \"val\", \"test\"])\n\n if self.sample_pairs:\n self.metric_types += self.special_metric_types\n\n # Creates a dict to hold the loss and accuracy results from training, validation and testing.\n self.metrics: Dict[str, Any] = {}\n for mode in self.modes:\n for metric in self.metric_types:\n self.metrics[f\"{mode}_{metric}\"] = {\"x\": [], \"y\": []}\n\n def __call__(self, mode: str, logs: Dict[str, Any]) -> None:\n self.calc_metrics(mode, logs)\n\n @abc.abstractmethod\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n pass # pragma: no cover\n\n @abc.abstractmethod\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n pass # pragma: no cover\n\n @property\n def get_metrics(self) -> Dict[str, Any]:\n \"\"\"Get the ``metrics`` dictionary.\n\n Returns:\n dict[str, Any]: Metrics dictionary.\n \"\"\"\n return self.metrics\n\n def get_sub_metrics(\n self, pattern: Tuple[str, ...] = (\"train\", \"val\")\n ) -> Dict[str, Any]:\n \"\"\"Gets a subset of the metrics dictionary with keys containing strings in the pattern.\n\n Useful for getting the train and validation metrics for plotting for example.\n\n Args:\n pattern (tuple[str, ...]): Optional; Strings to pattern match the metric keys to be returned.\n Defaults to ``(\"train\", \"val\")``.\n\n Returns:\n dict[str, ~typing.Any]: Subset of ``metrics`` with keys that contained strings in ``pattern``.\n \"\"\"\n sub_metrics = {}\n for key in self.metrics.keys():\n if key.split(\"_\")[0] in pattern:\n sub_metrics[key] = self.metrics[key]\n\n return sub_metrics\n\n @abc.abstractmethod\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n pass # pragma: no cover\n\n\nclass SPMetrics(MinervaMetrics):\n \"\"\"Metric logging for supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types: List[str] = [\"loss\", \"acc\", \"miou\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n **params,\n ) -> None:\n super(SPMetrics, self).__init__(\n n_batches, batch_size, data_size, model_type=model_type\n )\n\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n if logs.get(\"total_miou\") is not None:\n self.metrics[f\"{mode}_miou\"][\"y\"].append(\n logs[\"total_miou\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_miou\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}%\".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.model_type == \"segmentation\":\n msg += \" | mIoU: {}\".format(self.metrics[f\"{mode}_miou\"][\"y\"][epoch_no])\n\n msg += \"\\n\"\n print(msg)\n\n\nclass SSLMetrics(MinervaMetrics):\n \"\"\"Metric logging for self-supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types = [\"loss\", \"acc\", \"top5_acc\"]\n special_metric_types = [\"collapse_level\", \"euc_dist\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n sample_pairs: bool = False,\n **params,\n ) -> None:\n super(SSLMetrics, self).__init__(\n n_batches,\n batch_size,\n data_size,\n model_type=model_type,\n sample_pairs=sample_pairs,\n )\n\n def calc_metrics(self, mode: str, logs) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"y\"].append(logs[\"collapse_level\"])\n self.metrics[f\"{mode}_euc_dist\"][\"y\"].append(\n logs[\"euc_dist\"] / self.n_batches[mode]\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_top5_acc\"][\"x\"].append(epoch_no + 1)\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_euc_dist\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}% | Top5 Accuracy: {}% \".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n self.metrics[f\"{mode}_top5_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.sample_pairs and mode == \"train\":\n msg += \"\\n\"\n\n msg += \"| Collapse Level: {}%\".format(\n self.metrics[f\"{mode}_collapse_level\"][\"y\"][epoch_no] * 100.0\n )\n msg += \"| Avg. Euclidean Distance: {}\".format(\n self.metrics[f\"{mode}_euc_dist\"][\"y\"][epoch_no]\n )\n\n msg += \"\\n\"\n print(msg)\n" }, "sourceLanguage": "Python" }, @@ -16662,9 +17015,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 3317, + "charLength": 14337, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to run the TensorBoard logs from experiments.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nfrom typing import List, Optional, Union\n\nfrom minerva.utils import utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(\n path: Optional[Union[str, List[str]]] = None,\n env_name: str = \"env2\",\n exp_name: Optional[str] = None,\n host_num: int = 6006,\n) -> None:\n assert exp_name is not None\n\n if isinstance(path, list):\n if len(path) == 1:\n path = path[0]\n\n utils.run_tensorboard(exp_name, path=path, env_name=env_name, host_num=host_num)\n\n\nif __name__ == \"__main__\":\n CLI = argparse.ArgumentParser()\n CLI.add_argument(\n \"--path\", # name on the CLI - drop the `--` for positional/required parameters\n nargs=\"*\", # 0 or more values expected => creates a list\n type=str,\n default=None, # default if nothing is provided\n )\n CLI.add_argument(\n \"--env_name\",\n nargs=\"1\",\n type=str, # any type/callable can be used here\n default=None,\n )\n CLI.add_argument(\n \"--exp_name\",\n nargs=\"1\",\n type=str, # any type/callable can be used here\n default=None,\n )\n CLI.add_argument(\n \"--host_num\",\n nargs=\"1\",\n type=int, # any type/callable can be used here\n default=None,\n )\n\n args = CLI.parse_args()\n main(\n path=args.path,\n env_name=args.env_name,\n exp_name=args.exp_name,\n host_num=args.host_num,\n )\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to calculate the metrics of a model's fitting.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaMetrics\",\n \"SPMetrics\",\n \"SSLMetrics\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom abc import ABC\nfrom typing import Any, Dict, List, Tuple\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaMetrics(ABC):\n \"\"\"Abstract class for metric logging within the :mod:`minerva` framework.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n metric_types: List[str] = []\n special_metric_types: List[str] = []\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n **params,\n ) -> None:\n super(MinervaMetrics, self).__init__()\n\n self.n_batches = n_batches\n self.batch_size = batch_size\n self.data_size = data_size\n\n self.model_type = params.get(\"model_type\", \"scene_classifier\")\n self.sample_pairs = params.get(\"sample_pairs\", False)\n\n self.modes = params.get(\"modes\", [\"train\", \"val\", \"test\"])\n\n if self.sample_pairs:\n self.metric_types += self.special_metric_types\n\n # Creates a dict to hold the loss and accuracy results from training, validation and testing.\n self.metrics: Dict[str, Any] = {}\n for mode in self.modes:\n for metric in self.metric_types:\n self.metrics[f\"{mode}_{metric}\"] = {\"x\": [], \"y\": []}\n\n def __call__(self, mode: str, logs: Dict[str, Any]) -> None:\n self.calc_metrics(mode, logs)\n\n @abc.abstractmethod\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n pass # pragma: no cover\n\n @abc.abstractmethod\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n pass # pragma: no cover\n\n @property\n def get_metrics(self) -> Dict[str, Any]:\n \"\"\"Get the ``metrics`` dictionary.\n\n Returns:\n dict[str, Any]: Metrics dictionary.\n \"\"\"\n return self.metrics\n\n def get_sub_metrics(\n self, pattern: Tuple[str, ...] = (\"train\", \"val\")\n ) -> Dict[str, Any]:\n \"\"\"Gets a subset of the metrics dictionary with keys containing strings in the pattern.\n\n Useful for getting the train and validation metrics for plotting for example.\n\n Args:\n pattern (tuple[str, ...]): Optional; Strings to pattern match the metric keys to be returned.\n Defaults to ``(\"train\", \"val\")``.\n\n Returns:\n dict[str, ~typing.Any]: Subset of ``metrics`` with keys that contained strings in ``pattern``.\n \"\"\"\n sub_metrics = {}\n for key in self.metrics.keys():\n if key.split(\"_\")[0] in pattern:\n sub_metrics[key] = self.metrics[key]\n\n return sub_metrics\n\n @abc.abstractmethod\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n pass # pragma: no cover\n\n\nclass SPMetrics(MinervaMetrics):\n \"\"\"Metric logging for supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types: List[str] = [\"loss\", \"acc\", \"miou\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n **params,\n ) -> None:\n super(SPMetrics, self).__init__(\n n_batches, batch_size, data_size, model_type=model_type\n )\n\n def calc_metrics(self, mode: str, logs: Dict[str, Any]) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n if logs.get(\"total_miou\") is not None:\n self.metrics[f\"{mode}_miou\"][\"y\"].append(\n logs[\"total_miou\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_miou\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}%\".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.model_type == \"segmentation\":\n msg += \" | mIoU: {}\".format(self.metrics[f\"{mode}_miou\"][\"y\"][epoch_no])\n\n msg += \"\\n\"\n print(msg)\n\n\nclass SSLMetrics(MinervaMetrics):\n \"\"\"Metric logging for self-supervised models.\n\n Attributes:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n metrics (dict[str, ~typing.Any]): Dictionary to hold the metrics to assess the model with\n for each mode of fitting.\n model_type (str): Type of the model.\n\n Args:\n n_batches (dict[str, int]): Dictionary of the number of batches in each mode of fitting.\n batch_size (int): Batch size.\n data_size (tuple[int, int, int]): Shape of the input data in ``C x H x W``.\n model_type (str): Optional; Type of the model.\n \"\"\"\n\n metric_types = [\"loss\", \"acc\", \"top5_acc\"]\n special_metric_types = [\"collapse_level\", \"euc_dist\"]\n\n def __init__(\n self,\n n_batches: Dict[str, int],\n batch_size: int,\n data_size: Tuple[int, int, int],\n model_type: str = \"segmentation\",\n sample_pairs: bool = False,\n **params,\n ) -> None:\n super(SSLMetrics, self).__init__(\n n_batches,\n batch_size,\n data_size,\n model_type=model_type,\n sample_pairs=sample_pairs,\n )\n\n def calc_metrics(self, mode: str, logs) -> None:\n \"\"\"Updates metrics with epoch results.\n\n Args:\n mode (str): Mode of model fitting.\n logs (dict[str, ~typing.Any]): Logs of the results from the epoch of fitting to calculate metrics from.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"y\"].append(\n logs[\"total_loss\"] / self.n_batches[mode]\n )\n\n if self.model_type == \"segmentation\":\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"]\n / (\n self.n_batches[mode]\n * self.batch_size\n * self.data_size[1]\n * self.data_size[2]\n )\n )\n\n else:\n self.metrics[f\"{mode}_acc\"][\"y\"].append(\n logs[\"total_correct\"] / (self.n_batches[mode] * self.batch_size)\n )\n self.metrics[f\"{mode}_top5_acc\"][\"y\"].append(\n logs[\"total_top5\"] / (self.n_batches[mode] * self.batch_size)\n )\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"y\"].append(logs[\"collapse_level\"])\n self.metrics[f\"{mode}_euc_dist\"][\"y\"].append(\n logs[\"euc_dist\"] / self.n_batches[mode]\n )\n\n def log_epoch_number(self, mode: str, epoch_no: int) -> None:\n \"\"\"Logs the epoch number to ``metrics``.\n\n Args:\n mode (str): Mode of model fitting.\n epoch_no (int): Epoch number to log.\n \"\"\"\n self.metrics[f\"{mode}_loss\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_acc\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_top5_acc\"][\"x\"].append(epoch_no + 1)\n\n if self.sample_pairs and mode == \"train\":\n self.metrics[f\"{mode}_collapse_level\"][\"x\"].append(epoch_no + 1)\n self.metrics[f\"{mode}_euc_dist\"][\"x\"].append(epoch_no + 1)\n\n def print_epoch_results(self, mode: str, epoch_no: int) -> None:\n \"\"\"Prints the results from an epoch to ``stdout``.\n\n Args:\n mode (str): Mode of fitting to print results from.\n epoch_no (int): Epoch number to print results from.\n \"\"\"\n msg = \"{} | Loss: {} | Accuracy: {}% | Top5 Accuracy: {}% \".format(\n mode,\n self.metrics[f\"{mode}_loss\"][\"y\"][epoch_no],\n self.metrics[f\"{mode}_acc\"][\"y\"][epoch_no] * 100.0,\n self.metrics[f\"{mode}_top5_acc\"][\"y\"][epoch_no] * 100.0,\n )\n\n if self.sample_pairs and mode == \"train\":\n msg += \"\\n\"\n\n msg += \"| Collapse Level: {}%\".format(\n self.metrics[f\"{mode}_collapse_level\"][\"y\"][epoch_no] * 100.0\n )\n msg += \"| Avg. Euclidean Distance: {}\".format(\n self.metrics[f\"{mode}_euc_dist\"][\"y\"][epoch_no]\n )\n\n msg += \"\\n\"\n print(msg)\n" } } }, @@ -16677,7 +17030,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "e2400a15b2b56555689b52801b7beec910fcaa931c726cba4b746a14b6f76727" + "equalIndicator/v1": "ccc2230d965b84f9687767bc636766d7efe6857ead043a701f6529f82dff5351" }, "properties": { "ideaSeverity": "ERROR" @@ -16695,16 +17048,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/TorchWeightDownloader.py", + "uri": "minerva/models/core.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 2280, + "charLength": 18297, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Loads :mod:`torch` weights from Torch Hub into cache.\n\nAttributes:\n resnets (List[str]): List of tags for ``pytorch`` resnet weights to download.\n\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Optional\n\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.models import get_torch_weights\n\nresnets = [\n \"ResNet101_Weights.IMAGENET1K_V1\",\n \"ResNet152_Weights.IMAGENET1K_V1\",\n \"ResNet18_Weights.IMAGENET1K_V1\",\n \"ResNet34_Weights.IMAGENET1K_V1\",\n \"ResNet50_Weights.IMAGENET1K_V1\",\n]\n\n\ndef main() -> None:\n for resnet in resnets:\n weights: Optional[WeightsEnum] = get_torch_weights(resnet)\n assert weights\n _ = weights.get_state_dict(True)\n\n\nif __name__ == \"__main__\":\n main()\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing core utility functions and abstract classes for :mod:`models`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n__all__ = [\n \"MinervaModel\",\n \"MinervaWrapper\",\n \"MinervaDataParallel\",\n \"MinervaBackbone\",\n \"MinervaOnnxModel\",\n \"get_model\",\n \"get_torch_weights\",\n \"get_output_shape\",\n \"bilinear_init\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nimport os\nfrom abc import ABC\nfrom pathlib import Path\nfrom typing import (\n Any,\n Callable,\n Iterable,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n overload,\n)\n\nimport numpy as np\nimport torch\nfrom nptyping import NDArray\nfrom torch import Tensor\nfrom torch.nn.modules import Module\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim import Optimizer\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.utils.utils import func_by_str\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaModel(Module, ABC):\n \"\"\"Abstract class to act as a base for all Minerva Models.\n\n Designed to provide inter-compatability with :class:`~trainer.Trainer`.\n\n Attributes:\n criterion (~torch.nn.Module): :mod:`torch` loss function model will use.\n input_shape (tuple[int, ...]): Optional; Defines the shape of the input data. Typically in order of\n number of channels, image width, image height but may vary dependant on model specs.\n n_classes (int): Number of classes in input data.\n output_shape: The shape of the output of the network. Determined and set by :meth:`determine_output_dim`.\n optimiser: :mod:`torch` optimiser model will use, to be initialised with inherited model's parameters.\n\n Args:\n criterion (~torch.nn.Module): Optional; :mod:`torch` loss function model will use.\n input_shape (tuple[int, ...]): Optional; Defines the shape of the input data. Typically in order of\n number of channels, image width, image height but may vary dependant on model specs.\n n_classes (int): Optional; Number of classes in input data.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(\n self,\n criterion: Optional[Module] = None,\n input_size: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n ) -> None:\n super(MinervaModel, self).__init__()\n\n # Sets loss function\n self.criterion: Optional[Module] = criterion\n\n self.input_size = input_size\n self.n_classes = n_classes\n\n # Output shape initialised as None. Should be set by calling determine_output_dim.\n self.output_shape: Optional[Union[int, Iterable[int]]] = None\n\n # Optimiser initialised as None as the model parameters created by its init is required to init a\n # torch optimiser. The optimiser MUST be set by calling set_optimiser before the model can be trained.\n self.optimiser: Optional[Optimizer] = None\n\n def set_optimiser(self, optimiser: Optimizer) -> None:\n \"\"\"Sets the optimiser used by the model.\n\n .. warning::\n *MUST* be called after initialising a model and supplied with a :class:`torch.optim.Optimizer`\n using this model's parameters.\n\n Args:\n optimiser (~torch.optim.Optimizer): :class:`torch.optim.Optimizer` model will use,\n initialised with this model's parameters.\n \"\"\"\n self.optimiser = optimiser\n\n def determine_output_dim(self, sample_pairs: bool = False) -> None:\n \"\"\"Uses :func:`get_output_shape` to find the dimensions of the output of this model and sets to attribute.\"\"\"\n\n assert self.input_size is not None\n\n self.output_shape = get_output_shape(\n self, self.input_size, sample_pairs=sample_pairs\n )\n\n @overload\n def step(\n self, x: Tensor, y: Tensor, train: bool = False\n ) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]]]:\n ... # pragma: no cover\n\n @overload\n def step(\n self, x: Tensor, *, train: bool = False\n ) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]]]:\n ... # pragma: no cover\n\n def step(\n self,\n x: Tensor,\n y: Optional[Tensor] = None,\n train: bool = False,\n ) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]]]:\n \"\"\"Generic step of model fitting using a batch of data.\n\n Raises:\n NotImplementedError: If :attr:`~MinervaModel.optimiser` is ``None``.\n NotImplementedError: If :attr:`~MinervaModel.criterion` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n y (~torch.Tensor): Either a batch of ground truth labels or generated labels/ pairs.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step\n which will then clear the :attr:`~MinervaModel.optimiser`, and perform a backward pass of the\n network then update the :attr:`~MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor | tuple[~torch.Tensor, ...]]: :class:`tuple` of the loss computed\n by the loss function and the model outputs.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n if self.criterion is None:\n raise NotImplementedError(\"Criterion has not been set!\")\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n z: Union[Tensor, Tuple[Tensor, ...]] = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = self.criterion(z, y)\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, z\n\n\nclass MinervaWrapper(MinervaModel):\n \"\"\"Wraps a :mod:`torch` model class in :class:`MinervaModel` so it can be used in :mod:`minerva`.\n\n Attributes:\n model (~torch.nn.Module): The wrapped :mod:`torch` model that is now compatible with :mod:`minerva`.\n\n Args:\n model_cls (~typing.Callable[..., ~torch.nn.Module]): The :mod:`torch` model class to wrap, initialise\n and place in :attr:`~MinervaWrapper.model`.\n criterion (~torch.nn.Module): Optional; :mod:`torch` loss function model will use.\n input_shape (tuple[int, ...]): Optional; Defines the shape of the input data. Typically in order of\n number of channels, image width, image height but may vary dependant on model specs.\n n_classes (int): Optional; Number of classes in input data.\n\n \"\"\"\n\n def __init__(\n self,\n model_cls: Callable[..., Module],\n criterion: Optional[Module] = None,\n input_size: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n *args,\n **kwargs,\n ) -> None:\n super().__init__(criterion, input_size, n_classes)\n\n self.model = model_cls(*args, **kwargs)\n\n def __call__(self, *inputs) -> Any:\n return self.forward(*inputs)\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model, name)\n\n def __repr__(self) -> Any:\n return self.model.__repr__()\n\n def forward(self, *inputs) -> Any:\n return self.model.forward(*inputs)\n\n\nclass MinervaBackbone(MinervaModel):\n \"\"\"Abstract class to mark a model for use as a backbone.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.backbone: MinervaModel\n\n def get_backbone(self) -> Module:\n \"\"\"Gets the :attr:`~MinervaBackbone.backbone` network of the model.\n\n Returns:\n ~torch.nn.Module: The :attr:`~MinervaModel.backbone` of the model.\n \"\"\"\n return self.backbone\n\n\nclass MinervaDataParallel(Module): # pragma: no cover\n \"\"\"Wrapper for :class:`~torch.nn.parallel.data_parallel.DataParallel` or\n :class:`~torch.nn.parallel.DistributedDataParallel` that automatically fetches the\n attributes of the wrapped model.\n\n Attributes:\n model (~torch.nn.Module): :mod:`torch` model to be wrapped by\n :class:`~torch.nn.parallel.data_parallel.DataParallel` or\n :class:`~torch.nn.parallel.DistributedDataParallel`.\n paralleliser (~torch.nn.parallel.data_parallel.DataParallel | ~torch.nn.parallel.DistributedDataParallel):\n The paralleliser to wrap the :attr:`~MinervaDataParallel.model` in.\n\n Args:\n model (~torch.nn.Module): :mod:`torch` model to be wrapped by\n :class:`~torch.nn.parallel.data_parallel.DataParallel` or\n :class:`~torch.nn.parallel.DistributedDataParallel`.\n \"\"\"\n\n def __init__(\n self,\n model: Module,\n paralleliser: Union[Type[DataParallel], Type[DistributedDataParallel]],\n *args,\n **kwargs,\n ) -> None:\n super(MinervaDataParallel, self).__init__()\n self.model = paralleliser(model, *args, **kwargs).cuda()\n\n def forward(self, *inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:\n \"\"\"Ensures a forward call to the model goes to the actual wrapped model.\n\n Args:\n inputs (tuple[~torch.Tensor, ...]): Input of tensors to be parsed to the\n :attr:`~MinervaDataParallel.model` forward.\n\n Returns:\n tuple[~torch.Tensor, ...]: Output of :attr:`~MinervaDataParallel.model`.\n \"\"\"\n z = self.model(*inputs)\n assert isinstance(z, tuple) and list(map(type, z)) == [Tensor] * len(z)\n return z\n\n def __call__(self, *inputs) -> Tuple[Tensor, ...]:\n return self.forward(*inputs)\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model.module, name)\n\n def __repr__(self) -> Any:\n return self.model.__repr__()\n\n\nclass MinervaOnnxModel(MinervaModel):\n \"\"\"Special model class for enabling :mod:`onnx` models to be used within :mod:`minerva`.\n\n Attributes:\n model (~torch.nn.Module): :mod:`onnx` model imported into :mod:`torch`.\n\n Args:\n model (~torch.nn.Module): :mod:`onnx` model imported into :mod:`torch`.\n \"\"\"\n\n def __init__(self, model: Module, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.model = model\n\n def __call__(self, *inputs) -> Any:\n return self.model.forward(*inputs)\n\n def __getattr__(self, name) -> Any:\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model, name)\n\n def __repr__(self) -> Any:\n return self.model.__repr__()\n\n def forward(self, *inputs: Any) -> Any:\n \"\"\"Performs a forward pass of the :attr:`~MinervaOnnxModel.model` within.\n\n Args:\n inputs (~typing.Any): Input to be parsed to the ``.forward`` method of :attr:`~MinervaOnnxModel.model`.\n\n Returns:\n ~typing.Any: Output of :attr:`~MinervaOnnxModel.model`.\n \"\"\"\n return self.model.forward(*inputs)\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef get_model(model_name: str) -> Callable[..., MinervaModel]:\n \"\"\"Returns the constructor of the ``model_name`` in :mod:`models`.\n\n Args:\n model_name (str): Name of the model to get.\n\n Returns:\n ~typing.Callable[..., MinervaModel]: Constructor of the model requested.\n \"\"\"\n model: Callable[..., MinervaModel] = func_by_str(\"minerva.models\", model_name)\n return model\n\n\ndef get_torch_weights(weights_name: str) -> Optional[WeightsEnum]:\n \"\"\"Loads pre-trained model weights from :mod:`torchvision` via Torch Hub API.\n\n Args:\n weights_name (str): Name of model weights. See\n https://pytorch.org/vision/stable/models.html#table-of-all-available-classification-weights\n for a list of possible pre-trained weights.\n\n Returns:\n torchvision.models._api.WeightsEnum | None: API query for the specified weights.\n ``None`` if query cannot be found. See note on use:\n\n Note:\n This function only returns a query for the API of the weights. To actually use them, you need to call\n :meth:`~torchvision.models._api.WeightsEnum.get_state_dict` to download the weights (if not already in cache).\n \"\"\"\n weights: Optional[WeightsEnum] = None\n try:\n weights = torch.hub.load(\"pytorch/vision\", \"get_weight\", name=weights_name)\n except OSError: # pragma: no cover\n th_dir = os.environ.get(\"TORCH_HUB\", Path(\"~/.cache/torch/hub\").expanduser())\n try:\n weights = torch.hub.load(\n f\"{th_dir}/pytorch_vision_main\",\n \"get_weight\",\n name=weights_name,\n source=\"local\",\n )\n except FileNotFoundError as err: # pragma: no cover\n print(err)\n weights = None\n\n return weights\n\n\ndef get_output_shape(\n model: Module,\n image_dim: Union[Sequence[int], int],\n sample_pairs: bool = False,\n) -> Union[int, Sequence[int]]:\n \"\"\"Gets the output shape of a model.\n\n Args:\n model (~torch.nn.Module): Model for which the shape of the output needs to be found.\n image_dim (~typing.Sequence[int] | int]): Expected shape of the input data to the model.\n sample_pairs (bool): Optional; Flag for if paired sampling is active.\n Will send a paired sample through the model.\n\n Returns:\n int | ~typing.Sequence[int]: The shape of the output data from the model.\n \"\"\"\n _image_dim: Union[Sequence[int], int] = image_dim\n try:\n assert not isinstance(image_dim, int)\n if len(image_dim) == 1:\n _image_dim = image_dim[0]\n except (AssertionError, TypeError):\n if not hasattr(image_dim, \"__len__\"):\n pass\n\n if not hasattr(_image_dim, \"__len__\"):\n assert isinstance(_image_dim, int)\n random_input = torch.rand([4, _image_dim])\n elif sample_pairs:\n assert isinstance(_image_dim, Iterable)\n random_input = torch.rand([2, 4, *_image_dim])\n else:\n assert isinstance(_image_dim, Iterable)\n random_input = torch.rand([4, *_image_dim])\n\n output: Tensor = model(random_input)\n\n if len(output[0].data.shape) == 1:\n return output[0].data.shape[0]\n\n else:\n return output[0].data.shape[1:]\n\n\ndef bilinear_init(in_channels: int, out_channels: int, kernel_size: int) -> Tensor:\n \"\"\"Constructs the weights for the bi-linear interpolation kernel for use in transpose convolutional layers.\n\n Source: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Args:\n in_channels (int): Number of input channels to the layer.\n out_channels (int): Number of output channels from the layer.\n kernel_size (int): Size of the (square) kernel.\n\n Returns:\n ~torch.Tensor: :class:`~torch.Tensor` of the initialised bi-linear interpolated weights for the\n transpose convolutional layer's kernels.\n \"\"\"\n factor = (kernel_size + 1) // 2\n\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = int(factor - 0.5)\n\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight: NDArray[Any, Any] = np.zeros(\n (in_channels, out_channels, kernel_size, kernel_size), dtype=\"float32\"\n )\n weight[range(in_channels), range(out_channels), :, :] = filt\n\n weights = torch.from_numpy(weight) # type: ignore[attr-defined]\n assert isinstance(weights, Tensor)\n return weights\n" }, "sourceLanguage": "Python" }, @@ -16712,9 +17065,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 2280, + "charLength": 18297, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Loads :mod:`torch` weights from Torch Hub into cache.\n\nAttributes:\n resnets (List[str]): List of tags for ``pytorch`` resnet weights to download.\n\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Optional\n\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.models import get_torch_weights\n\nresnets = [\n \"ResNet101_Weights.IMAGENET1K_V1\",\n \"ResNet152_Weights.IMAGENET1K_V1\",\n \"ResNet18_Weights.IMAGENET1K_V1\",\n \"ResNet34_Weights.IMAGENET1K_V1\",\n \"ResNet50_Weights.IMAGENET1K_V1\",\n]\n\n\ndef main() -> None:\n for resnet in resnets:\n weights: Optional[WeightsEnum] = get_torch_weights(resnet)\n assert weights\n _ = weights.get_state_dict(True)\n\n\nif __name__ == \"__main__\":\n main()\n" + "text": "# -*- coding: utf-8 -*-\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing core utility functions and abstract classes for :mod:`models`.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n__all__ = [\n \"MinervaModel\",\n \"MinervaWrapper\",\n \"MinervaDataParallel\",\n \"MinervaBackbone\",\n \"MinervaOnnxModel\",\n \"get_model\",\n \"get_torch_weights\",\n \"get_output_shape\",\n \"bilinear_init\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nimport os\nfrom abc import ABC\nfrom pathlib import Path\nfrom typing import (\n Any,\n Callable,\n Iterable,\n Optional,\n Sequence,\n Tuple,\n Type,\n Union,\n overload,\n)\n\nimport numpy as np\nimport torch\nfrom nptyping import NDArray\nfrom torch import Tensor\nfrom torch.nn.modules import Module\nfrom torch.nn.parallel import DataParallel, DistributedDataParallel\nfrom torch.optim import Optimizer\nfrom torchvision.models._api import WeightsEnum\n\nfrom minerva.utils.utils import func_by_str\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaModel(Module, ABC):\n \"\"\"Abstract class to act as a base for all Minerva Models.\n\n Designed to provide inter-compatability with :class:`~trainer.Trainer`.\n\n Attributes:\n criterion (~torch.nn.Module): :mod:`torch` loss function model will use.\n input_shape (tuple[int, ...]): Optional; Defines the shape of the input data. Typically in order of\n number of channels, image width, image height but may vary dependant on model specs.\n n_classes (int): Number of classes in input data.\n output_shape: The shape of the output of the network. Determined and set by :meth:`determine_output_dim`.\n optimiser: :mod:`torch` optimiser model will use, to be initialised with inherited model's parameters.\n\n Args:\n criterion (~torch.nn.Module): Optional; :mod:`torch` loss function model will use.\n input_shape (tuple[int, ...]): Optional; Defines the shape of the input data. Typically in order of\n number of channels, image width, image height but may vary dependant on model specs.\n n_classes (int): Optional; Number of classes in input data.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(\n self,\n criterion: Optional[Module] = None,\n input_size: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n ) -> None:\n super(MinervaModel, self).__init__()\n\n # Sets loss function\n self.criterion: Optional[Module] = criterion\n\n self.input_size = input_size\n self.n_classes = n_classes\n\n # Output shape initialised as None. Should be set by calling determine_output_dim.\n self.output_shape: Optional[Union[int, Iterable[int]]] = None\n\n # Optimiser initialised as None as the model parameters created by its init is required to init a\n # torch optimiser. The optimiser MUST be set by calling set_optimiser before the model can be trained.\n self.optimiser: Optional[Optimizer] = None\n\n def set_optimiser(self, optimiser: Optimizer) -> None:\n \"\"\"Sets the optimiser used by the model.\n\n .. warning::\n *MUST* be called after initialising a model and supplied with a :class:`torch.optim.Optimizer`\n using this model's parameters.\n\n Args:\n optimiser (~torch.optim.Optimizer): :class:`torch.optim.Optimizer` model will use,\n initialised with this model's parameters.\n \"\"\"\n self.optimiser = optimiser\n\n def determine_output_dim(self, sample_pairs: bool = False) -> None:\n \"\"\"Uses :func:`get_output_shape` to find the dimensions of the output of this model and sets to attribute.\"\"\"\n\n assert self.input_size is not None\n\n self.output_shape = get_output_shape(\n self, self.input_size, sample_pairs=sample_pairs\n )\n\n @overload\n def step(\n self, x: Tensor, y: Tensor, train: bool = False\n ) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]]]:\n ... # pragma: no cover\n\n @overload\n def step(\n self, x: Tensor, *, train: bool = False\n ) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]]]:\n ... # pragma: no cover\n\n def step(\n self,\n x: Tensor,\n y: Optional[Tensor] = None,\n train: bool = False,\n ) -> Tuple[Tensor, Union[Tensor, Tuple[Tensor, ...]]]:\n \"\"\"Generic step of model fitting using a batch of data.\n\n Raises:\n NotImplementedError: If :attr:`~MinervaModel.optimiser` is ``None``.\n NotImplementedError: If :attr:`~MinervaModel.criterion` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n y (~torch.Tensor): Either a batch of ground truth labels or generated labels/ pairs.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step\n which will then clear the :attr:`~MinervaModel.optimiser`, and perform a backward pass of the\n network then update the :attr:`~MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor | tuple[~torch.Tensor, ...]]: :class:`tuple` of the loss computed\n by the loss function and the model outputs.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n if self.criterion is None:\n raise NotImplementedError(\"Criterion has not been set!\")\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n z: Union[Tensor, Tuple[Tensor, ...]] = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = self.criterion(z, y)\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, z\n\n\nclass MinervaWrapper(MinervaModel):\n \"\"\"Wraps a :mod:`torch` model class in :class:`MinervaModel` so it can be used in :mod:`minerva`.\n\n Attributes:\n model (~torch.nn.Module): The wrapped :mod:`torch` model that is now compatible with :mod:`minerva`.\n\n Args:\n model_cls (~typing.Callable[..., ~torch.nn.Module]): The :mod:`torch` model class to wrap, initialise\n and place in :attr:`~MinervaWrapper.model`.\n criterion (~torch.nn.Module): Optional; :mod:`torch` loss function model will use.\n input_shape (tuple[int, ...]): Optional; Defines the shape of the input data. Typically in order of\n number of channels, image width, image height but may vary dependant on model specs.\n n_classes (int): Optional; Number of classes in input data.\n\n \"\"\"\n\n def __init__(\n self,\n model_cls: Callable[..., Module],\n criterion: Optional[Module] = None,\n input_size: Optional[Tuple[int, ...]] = None,\n n_classes: Optional[int] = None,\n *args,\n **kwargs,\n ) -> None:\n super().__init__(criterion, input_size, n_classes)\n\n self.model = model_cls(*args, **kwargs)\n\n def __call__(self, *inputs) -> Any:\n return self.forward(*inputs)\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model, name)\n\n def __repr__(self) -> Any:\n return self.model.__repr__()\n\n def forward(self, *inputs) -> Any:\n return self.model.forward(*inputs)\n\n\nclass MinervaBackbone(MinervaModel):\n \"\"\"Abstract class to mark a model for use as a backbone.\"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.backbone: MinervaModel\n\n def get_backbone(self) -> Module:\n \"\"\"Gets the :attr:`~MinervaBackbone.backbone` network of the model.\n\n Returns:\n ~torch.nn.Module: The :attr:`~MinervaModel.backbone` of the model.\n \"\"\"\n return self.backbone\n\n\nclass MinervaDataParallel(Module): # pragma: no cover\n \"\"\"Wrapper for :class:`~torch.nn.parallel.data_parallel.DataParallel` or\n :class:`~torch.nn.parallel.DistributedDataParallel` that automatically fetches the\n attributes of the wrapped model.\n\n Attributes:\n model (~torch.nn.Module): :mod:`torch` model to be wrapped by\n :class:`~torch.nn.parallel.data_parallel.DataParallel` or\n :class:`~torch.nn.parallel.DistributedDataParallel`.\n paralleliser (~torch.nn.parallel.data_parallel.DataParallel | ~torch.nn.parallel.DistributedDataParallel):\n The paralleliser to wrap the :attr:`~MinervaDataParallel.model` in.\n\n Args:\n model (~torch.nn.Module): :mod:`torch` model to be wrapped by\n :class:`~torch.nn.parallel.data_parallel.DataParallel` or\n :class:`~torch.nn.parallel.DistributedDataParallel`.\n \"\"\"\n\n def __init__(\n self,\n model: Module,\n paralleliser: Union[Type[DataParallel], Type[DistributedDataParallel]],\n *args,\n **kwargs,\n ) -> None:\n super(MinervaDataParallel, self).__init__()\n self.model = paralleliser(model, *args, **kwargs).cuda()\n\n def forward(self, *inputs: Tuple[Tensor, ...]) -> Tuple[Tensor, ...]:\n \"\"\"Ensures a forward call to the model goes to the actual wrapped model.\n\n Args:\n inputs (tuple[~torch.Tensor, ...]): Input of tensors to be parsed to the\n :attr:`~MinervaDataParallel.model` forward.\n\n Returns:\n tuple[~torch.Tensor, ...]: Output of :attr:`~MinervaDataParallel.model`.\n \"\"\"\n z = self.model(*inputs)\n assert isinstance(z, tuple) and list(map(type, z)) == [Tensor] * len(z)\n return z\n\n def __call__(self, *inputs) -> Tuple[Tensor, ...]:\n return self.forward(*inputs)\n\n def __getattr__(self, name):\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model.module, name)\n\n def __repr__(self) -> Any:\n return self.model.__repr__()\n\n\nclass MinervaOnnxModel(MinervaModel):\n \"\"\"Special model class for enabling :mod:`onnx` models to be used within :mod:`minerva`.\n\n Attributes:\n model (~torch.nn.Module): :mod:`onnx` model imported into :mod:`torch`.\n\n Args:\n model (~torch.nn.Module): :mod:`onnx` model imported into :mod:`torch`.\n \"\"\"\n\n def __init__(self, model: Module, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.model = model\n\n def __call__(self, *inputs) -> Any:\n return self.model.forward(*inputs)\n\n def __getattr__(self, name) -> Any:\n try:\n return super().__getattr__(name)\n except AttributeError:\n return getattr(self.model, name)\n\n def __repr__(self) -> Any:\n return self.model.__repr__()\n\n def forward(self, *inputs: Any) -> Any:\n \"\"\"Performs a forward pass of the :attr:`~MinervaOnnxModel.model` within.\n\n Args:\n inputs (~typing.Any): Input to be parsed to the ``.forward`` method of :attr:`~MinervaOnnxModel.model`.\n\n Returns:\n ~typing.Any: Output of :attr:`~MinervaOnnxModel.model`.\n \"\"\"\n return self.model.forward(*inputs)\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef get_model(model_name: str) -> Callable[..., MinervaModel]:\n \"\"\"Returns the constructor of the ``model_name`` in :mod:`models`.\n\n Args:\n model_name (str): Name of the model to get.\n\n Returns:\n ~typing.Callable[..., MinervaModel]: Constructor of the model requested.\n \"\"\"\n model: Callable[..., MinervaModel] = func_by_str(\"minerva.models\", model_name)\n return model\n\n\ndef get_torch_weights(weights_name: str) -> Optional[WeightsEnum]:\n \"\"\"Loads pre-trained model weights from :mod:`torchvision` via Torch Hub API.\n\n Args:\n weights_name (str): Name of model weights. See\n https://pytorch.org/vision/stable/models.html#table-of-all-available-classification-weights\n for a list of possible pre-trained weights.\n\n Returns:\n torchvision.models._api.WeightsEnum | None: API query for the specified weights.\n ``None`` if query cannot be found. See note on use:\n\n Note:\n This function only returns a query for the API of the weights. To actually use them, you need to call\n :meth:`~torchvision.models._api.WeightsEnum.get_state_dict` to download the weights (if not already in cache).\n \"\"\"\n weights: Optional[WeightsEnum] = None\n try:\n weights = torch.hub.load(\"pytorch/vision\", \"get_weight\", name=weights_name)\n except OSError: # pragma: no cover\n th_dir = os.environ.get(\"TORCH_HUB\", Path(\"~/.cache/torch/hub\").expanduser())\n try:\n weights = torch.hub.load(\n f\"{th_dir}/pytorch_vision_main\",\n \"get_weight\",\n name=weights_name,\n source=\"local\",\n )\n except FileNotFoundError as err: # pragma: no cover\n print(err)\n weights = None\n\n return weights\n\n\ndef get_output_shape(\n model: Module,\n image_dim: Union[Sequence[int], int],\n sample_pairs: bool = False,\n) -> Union[int, Sequence[int]]:\n \"\"\"Gets the output shape of a model.\n\n Args:\n model (~torch.nn.Module): Model for which the shape of the output needs to be found.\n image_dim (~typing.Sequence[int] | int]): Expected shape of the input data to the model.\n sample_pairs (bool): Optional; Flag for if paired sampling is active.\n Will send a paired sample through the model.\n\n Returns:\n int | ~typing.Sequence[int]: The shape of the output data from the model.\n \"\"\"\n _image_dim: Union[Sequence[int], int] = image_dim\n try:\n assert not isinstance(image_dim, int)\n if len(image_dim) == 1:\n _image_dim = image_dim[0]\n except (AssertionError, TypeError):\n if not hasattr(image_dim, \"__len__\"):\n pass\n\n if not hasattr(_image_dim, \"__len__\"):\n assert isinstance(_image_dim, int)\n random_input = torch.rand([4, _image_dim])\n elif sample_pairs:\n assert isinstance(_image_dim, Iterable)\n random_input = torch.rand([2, 4, *_image_dim])\n else:\n assert isinstance(_image_dim, Iterable)\n random_input = torch.rand([4, *_image_dim])\n\n output: Tensor = model(random_input)\n\n if len(output[0].data.shape) == 1:\n return output[0].data.shape[0]\n\n else:\n return output[0].data.shape[1:]\n\n\ndef bilinear_init(in_channels: int, out_channels: int, kernel_size: int) -> Tensor:\n \"\"\"Constructs the weights for the bi-linear interpolation kernel for use in transpose convolutional layers.\n\n Source: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Args:\n in_channels (int): Number of input channels to the layer.\n out_channels (int): Number of output channels from the layer.\n kernel_size (int): Size of the (square) kernel.\n\n Returns:\n ~torch.Tensor: :class:`~torch.Tensor` of the initialised bi-linear interpolated weights for the\n transpose convolutional layer's kernels.\n \"\"\"\n factor = (kernel_size + 1) // 2\n\n if kernel_size % 2 == 1:\n center = factor - 1\n else:\n center = int(factor - 0.5)\n\n og = np.ogrid[:kernel_size, :kernel_size]\n filt = (1 - abs(og[0] - center) / factor) * (1 - abs(og[1] - center) / factor)\n weight: NDArray[Any, Any] = np.zeros(\n (in_channels, out_channels, kernel_size, kernel_size), dtype=\"float32\"\n )\n weight[range(in_channels), range(out_channels), :, :] = filt\n\n weights = torch.from_numpy(weight) # type: ignore[attr-defined]\n assert isinstance(weights, Tensor)\n return weights\n" } } }, @@ -16727,7 +17080,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "bc5630105cdc66136e8780ff5cd44605afdeb3e8b933224fa939ef40cd456ca3" + "equalIndicator/v1": "4dda4c1f646c739ca0fdc8b59560186c21e00366f28c3f0b863cb30185a2cbe0" }, "properties": { "ideaSeverity": "ERROR" @@ -16745,16 +17098,16 @@ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/siamese.py", + "uri": "minerva/utils/runner.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 15544, + "charLength": 19199, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Siamese models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaSiamese\",\n \"SimCLR\",\n \"SimCLR18\",\n \"SimCLR34\",\n \"SimCLR50\",\n \"SimSiam\",\n \"SimSiam18\",\n \"SimSiam34\",\n \"SimSiam50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom typing import Any, Dict, Sequence, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom .core import MinervaBackbone, MinervaModel, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaSiamese(MinervaBackbone):\n \"\"\"Abstract class for Siamese models.\n\n Attributes:\n backbone (MinervaModel): The backbone encoder for the Siamese model.\n proj_head (~torch.nn.Module): The projection head for re-projecting the outputs\n from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.backbone: MinervaModel\n self.proj_head: Module\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Can be called directly as a method (e.g. ``model.forward()``) or when\n data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector ``A``.\n * Output feature vector ``B``.\n * Detached embedding, ``A``, from the :attr:`~MinervaSiamese.backbone`.\n * Detached embedding, ``B``, from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n return self.forward_pair(x)\n\n def forward_pair(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector A.\n * Output feature vector B.\n * Embedding, A, from the backbone.\n * Embedding, B, from the backbone.\n \"\"\"\n g_a, f_a = self.forward_single(x[0])\n g_b, f_b = self.forward_single(x[1])\n\n g = torch.cat([g_a, g_b], dim=0) # type: ignore[attr-defined]\n\n assert isinstance(g, Tensor)\n\n return g, g_a, g_b, f_a, f_b\n\n @abc.abstractmethod\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the backbone\n and feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the projection head\n and the detached embedding vector from the backbone.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n\nclass SimCLR(MinervaSiamese):\n \"\"\"Base SimCLR class to be subclassed by SimCLR variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimCLR.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimCLR that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from\n the :attr:`~SimCLR.backbone` encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimCLR.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimCLR, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n self.proj_head = nn.Sequential(\n nn.Linear(np.prod(backbone_out_shape), 512, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Linear(512, feature_dim, bias=False),\n )\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the\n :attr:`~SimCLR.backbone` and feeding its output into the :attr:`~SimCLR.proj_head`.\n\n Overwrites :meth:`MinervaSiamese.forward_single`\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the\n :attr:`~SimCLR.proj_head` and the detached embedding vector from the :attr:`~SimCLR.backbone`.\n \"\"\"\n f: Tensor = torch.flatten(self.backbone(x)[0], start_dim=1)\n g: Tensor = self.proj_head(f)\n\n return g, f\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n z, z_a, z_b, _, _ = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = self.criterion(z_a, z_b) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, z\n\n\nclass SimCLR18(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet18` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimCLR34(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet32` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimCLR50(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet50` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n\n\nclass SimSiam(MinervaSiamese):\n \"\"\"Base SimSiam class to be subclassed by SimSiam variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimSiam.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimSiam that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from the backbone encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimSiam.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n pred_dim: int = 512,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimSiam, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n prev_dim = np.prod(backbone_out_shape)\n\n self.proj_head = nn.Sequential( # type: ignore[arg-type]\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # first layer\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # second layer\n nn.Linear(prev_dim, feature_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(feature_dim, affine=False),\n ) # output layer\n # self.proj_head[6].bias.requires_grad = False # hack: not use bias as it is followed by BN\n\n # Build a 2-layer predictor.\n self.predictor = nn.Sequential(\n nn.Linear(feature_dim, pred_dim, bias=False),\n nn.BatchNorm1d(pred_dim),\n nn.ReLU(inplace=True), # hidden layer\n nn.Linear(pred_dim, feature_dim),\n ) # output layer\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of :class:`SimSiam` by using the forward methods of the backbone\n and feeding its output into the :attr:`~SimSiam.proj_head`.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from :attr:`~SimSiam.proj_head`\n and the detached embedding vector from the :attr:`~SimSiam.backbone`.\n \"\"\"\n z: Tensor = self.proj_head(torch.flatten(self.backbone(x)[0], start_dim=1)) # type: ignore[attr-defined]\n\n p: Tensor = self.predictor(z)\n\n return p, z.detach()\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n p, p_a, p_b, z_a, z_b = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = 0.5 * (self.criterion(z_a, p_b) + self.criterion(z_b, p_a)) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, p\n\n\nclass SimSiam18(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet18` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimSiam34(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet34` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimSiam50(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet50` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n" + "text": "# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle generic functionality for running :mod:`minerva` scripts.\n\nAttributes:\n GENERIC_PARSER (~argparse.ArgumentParser): A standard argparser with arguments for use in :mod:`minerva`.\n Can be used as the basis for a user defined extended argparser.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"GENERIC_PARSER\",\n \"WandbConnectionManager\",\n \"setup_wandb_run\",\n \"config_env_vars\",\n \"config_args\",\n \"distributed_run\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nimport signal\nimport subprocess\nfrom argparse import Namespace\nfrom typing import Any, Callable, Optional, Union\n\nimport requests\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom wandb.sdk.lib import RunDisabled\nfrom wandb.sdk.wandb_run import Run\n\nimport wandb\nfrom minerva.utils import CONFIG, MASTER_PARSER, utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# ---+ CLI +--------------------------------------------------------------+\nGENERIC_PARSER = argparse.ArgumentParser(parents=[MASTER_PARSER])\n\nGENERIC_PARSER.add_argument(\n \"-o\",\n \"--override\",\n dest=\"override\",\n action=\"store_true\",\n help=\"Override config arguments with the CLI arguments where they overlap.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--seed\",\n dest=\"seed\",\n type=int,\n default=42,\n help=\"Set seed number\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-name\",\n dest=\"model_name\",\n type=str,\n help=\"Name of model.\"\n + \" Sub-string before hyphen is taken as model class name.\"\n + \" Sub-string past hyphen can be used to differeniate between versions.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-type\",\n dest=\"model_type\",\n type=str,\n help=\"Type of model. Should be 'segmentation', 'scene_classifier', 'siamese' or 'mlp'\",\n choices=(\"segmentation\", \"ssl\", \"siamese\", \"scene_classifier\", \"mlp\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--max_epochs\",\n dest=\"max_epochs\",\n type=int,\n default=100,\n help=\"Maximum number of training epochs.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--batch-size\",\n dest=\"batch_size\",\n type=int,\n default=8,\n help=\"Number of samples in each batch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--lr\",\n dest=\"lr\",\n type=float,\n default=0.01,\n help=\"Learning rate of the optimiser.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--optim-func\",\n dest=\"optim_func\",\n type=str,\n default=\"SGD\",\n help=\"Name of the optimiser to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``optim_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--loss-func\",\n dest=\"loss_func\",\n type=str,\n default=\"CrossEntropyLoss\",\n help=\"Name of the loss function to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``loss_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--pre-train\",\n dest=\"pre_train\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--fine-tune\",\n dest=\"fine_tune\",\n action=\"store_true\",\n help=\"Sets experiment type to fine-tune. Will load pre-trained backbone from file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--eval\",\n dest=\"eval\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--balance\",\n dest=\"balance\",\n action=\"store_true\",\n help=\"Activates class balancing.\"\n + \" Depending on `model_type`, this will either be via sampling or weighting of the loss function.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--class-elim\",\n dest=\"elim\",\n action=\"store_true\",\n help=\"Eliminates classes that are specified in config but not present in the data.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--sample-pairs\",\n dest=\"sample_pairs\",\n action=\"store_true\",\n help=\"Use paired sampling. E.g. For Siamese models.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-model\",\n dest=\"save_model\",\n type=str,\n default=False,\n help=\"Whether to save the model at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically save the model to file.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--run-tensorboard\",\n dest=\"run_tensorboard\",\n type=str,\n default=False,\n help=\"Whether to run the Tensorboard logs at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically locate and run the logs on a local browser.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-plots-no\",\n dest=\"save\",\n action=\"store_false\",\n help=\"Plots created will not be saved to file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--show-plots\",\n dest=\"show\",\n action=\"store_true\",\n help=\"Show plots created in a window.\"\n + \" Warning: Do not use with a terminal-less operation, e.g. SLURM.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--print-dist\",\n dest=\"p_dist\",\n action=\"store_true\",\n help=\"Print the distribution of classes within the data to `stdout`.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--plot-last-epoch\",\n dest=\"plot_last_epoch\",\n action=\"store_true\",\n help=\"Plot the results from the final validation epoch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log\",\n dest=\"wandb_log\",\n action=\"store_true\",\n help=\"Activate Weights and Biases logging.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--project_name\",\n dest=\"project\",\n type=str,\n help=\"Name of the Weights and Biases project this experiment belongs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-entity\",\n dest=\"entity\",\n type=str,\n help=\"The Weights and Biases entity to send runs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-dir\",\n dest=\"wandb_dir\",\n type=str,\n default=\"./wandb\",\n help=\"Where to store the Weights and Biases logs locally.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log-all\",\n dest=\"log_all\",\n action=\"store_true\",\n help=\"Will log each process on Weights and Biases. Otherwise, logging will be performed from the master process.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--knn-k\",\n dest=\"knn_k\",\n type=int,\n default=200,\n help=\"Top k most similar images used to predict the image for KNN validation.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--val-freq\",\n dest=\"val_freq\",\n type=int,\n default=5,\n help=\"Perform a validation epoch with KNN for every ``val_freq``\"\n + \"training epochs for SSL or Siamese models.\",\n)\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass WandbConnectionManager:\n \"\"\"Checks for a connection to :mod:`wandb`. If not, sets :mod:`wandb` to offline during context.\"\"\"\n\n def __init__(self) -> None:\n try:\n requests.head(\"http://www.wandb.ai/\", timeout=0.1)\n self._on = True\n except requests.ConnectionError:\n self._on = False\n\n def __enter__(self) -> None:\n if self._on:\n os.environ[\"WANDB_MODE\"] = \"online\"\n else:\n os.environ[\"WANDB_MODE\"] = \"offline\"\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.environ[\"WANDB_MODE\"] = \"online\"\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}',\n shell=True,\n )\n exit()\n\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n\n\ndef setup_wandb_run(gpu: int, args: Namespace) -> Optional[Union[Run, RunDisabled]]:\n \"\"\"Sets up a :mod:`wandb` logger for either every process, the master process or not if not logging.\n\n Note:\n ``args`` must contain these keys:\n\n * ``wandb_log`` (bool): Activate :mod:`wandb` logging.\n * | ``log_all`` (bool): :mod:`wandb` logging on every process if ``True``.\n | Only log on master process if ``False``.\n * ``entity`` (str): :mod:`wandb` entity where to send runs to.\n * ``project`` (str): Name of the :mod:`wandb` project this experiment belongs to.\n * ``world_size`` (int): Total number of processes across the experiment.\n\n Args:\n gpu (int): Local process (GPU) number.\n args (~argparse.Namespace): CLI arguments from :mod:`argparse`.\n\n Returns:\n ~wandb.sdk.wandb_run.Run | ~wandb.sdk.lib.RunDisabled | None: The :mod:`wandb` run object\n for this process or ``None`` if ``log_all=False`` and ``rank!=0``.\n \"\"\"\n run: Optional[Union[Run, RunDisabled]] = None\n if CONFIG.get(\"wandb_log\", False) or CONFIG.get(\"project\", None):\n try:\n if CONFIG.get(\"log_all\", False) and args.world_size > 1:\n run = wandb.init( # pragma: no cover\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n group=CONFIG.get(\"group\", \"DDP\"),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n else:\n if gpu == 0:\n run = wandb.init(\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n CONFIG[\"wandb_log\"] = True\n except wandb.UsageError: # type: ignore[attr-defined] # pragma: no cover\n print(\n \"wandb API Key has not been inited.\",\n \"\\nEither call wandb.login(key=[your_api_key]) or use `wandb login` in the shell.\",\n \"\\nOr if not using wandb, safely ignore this message.\",\n )\n CONFIG[\"wandb_log\"] = False\n except wandb.errors.Error as err: # type: ignore[attr-defined] # pragma: no cover\n print(err)\n CONFIG[\"wandb_log\"] = False\n else:\n print(\"Weights and Biases logging OFF\")\n\n return run\n\n\ndef config_env_vars(args: Namespace) -> Namespace:\n \"\"\"Finds SLURM environment variables (if they exist) and configures args accordingly.\n\n If SLURM variables are found in the environment variables, the arguments are configured for a SLURM job:\n\n * ``args.rank`` is set to the ``SLURM_NODEID * args.ngpus_per_node``.\n * ``args.world_size`` is set to ``SLURM_NNODES * args.ngpus_per_node``.\n * ``args.dist_url`` is set to ``tcp://{host_name}:58472``\n\n If SLURM variables are not detected, the arguments are configured for a single-node job:\n\n * ``args.rank=0``.\n * ``args.world_size=args.ngpus_per_node``.\n * ``args.dist_url = \"tcp://localhost:58472\"``.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n if \"SLURM_JOB_ID\" in os.environ: # pragma: no cover\n # Single-node and multi-node distributed training on SLURM cluster.\n # Requeue job on SLURM preemption.\n signal.signal(signal.SIGUSR1, _handle_sigusr1) # type: ignore[attr-defined]\n signal.signal(signal.SIGTERM, _handle_sigterm)\n\n # Get SLURM variables.\n slurm_job_nodelist: Optional[str] = os.getenv(\"SLURM_JOB_NODELIST\")\n slurm_nodeid: Optional[str] = os.getenv(\"SLURM_NODEID\")\n slurm_nnodes: Optional[str] = os.getenv(\"SLURM_NNODES\")\n slurm_jobid: Optional[str] = os.getenv(\"SLURM_JOB_ID\")\n\n # Check that SLURM variables have been found.\n assert slurm_job_nodelist is not None\n assert slurm_nodeid is not None\n assert slurm_nnodes is not None\n assert slurm_jobid is not None\n\n # Find a common host name on all nodes.\n # Assume scontrol returns hosts in the same order on all nodes.\n cmd = \"scontrol show hostnames \" + slurm_job_nodelist\n stdout = subprocess.check_output(cmd.split())\n host_name = stdout.decode().splitlines()[0]\n args.rank = int(slurm_nodeid) * args.ngpus_per_node\n args.world_size = int(slurm_nnodes) * args.ngpus_per_node\n args.dist_url = f\"tcp://{host_name}:58472\"\n args.jobid = slurm_jobid\n\n else:\n # Single-node distributed training.\n args.rank = 0\n args.dist_url = \"tcp://localhost:58472\"\n args.world_size = args.ngpus_per_node\n args.jobid = None\n\n return args\n\n\ndef config_args(args: Namespace) -> Namespace:\n \"\"\"Prepare the arguments generated from the :mod:`argparse` CLI for the job run.\n\n * Finds and sets ``args.ngpus_per_node``;\n * updates the ``CONFIG`` with new arguments from the CLI;\n * sets the seeds from the seed found in ``CONFIG`` or from CLI;\n * uses :func:`config_env_vars` to determine the correct arguments for distributed computing jobs e.g. SLURM.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n args.ngpus_per_node = torch.cuda.device_count()\n\n # Convert CLI arguments to dict.\n args_dict = vars(args)\n\n # Find which CLI arguments are not in the config.\n new_args = {key: args_dict[key] for key in args_dict if key not in CONFIG}\n\n # Updates the config with new arguments from the CLI.\n CONFIG.update(new_args)\n\n # Overrides the arguments from the config with those of the CLI where they overlap.\n # WARNING: This will include the use of the default CLI arguments.\n if args_dict.get(\"override\"): # pragma: no cover\n updated_args = {\n key: args_dict[key]\n for key in args_dict\n if args_dict[key] != CONFIG[key] and args_dict[key] is not None\n }\n CONFIG.update(updated_args)\n\n # Get seed from config.\n seed = CONFIG.get(\"seed\", 42)\n\n # Set torch, numpy and inbuilt seeds for reproducibility.\n utils.set_seeds(seed)\n\n return config_env_vars(args)\n\n\ndef _run_preamble(\n gpu: int, run: Callable[[int, Namespace], Any], args: Namespace\n) -> None: # pragma: no cover\n # Calculates the global rank of this process.\n args.rank += gpu\n\n # Setups the `wandb` run for this process.\n args.wandb_run = setup_wandb_run(gpu, args)\n\n if args.world_size > 1:\n dist.init_process_group( # type: ignore[attr-defined]\n backend=\"gloo\",\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n print(f\"INITIALISED PROCESS ON {args.rank}\")\n\n if torch.cuda.is_available():\n torch.cuda.set_device(gpu)\n torch.backends.cudnn.benchmark = True # type: ignore\n\n # Start this process run.\n run(gpu, args)\n\n\ndef distributed_run(run: Callable[[int, Namespace], Any], args: Namespace) -> None:\n \"\"\"Runs the supplied function and arguments with distributed computing according to arguments.\n\n :func:`_run_preamble` adds some additional commands to initialise the process group for each run\n and allocating the GPU device number to use before running the supplied function.\n\n Note:\n ``args`` must contain the attributes ``rank``, ``world_size`` and ``dist_url``. These can be\n configured using :func:`config_env_vars` or :func:`config_args`.\n\n Args:\n run (~typing.Callable[[int, ~argparse.Namespace], ~typing.Any]): Function to run with distributed computing.\n args (~argparse.Namespace): Arguments for the run and to specify the variables for distributed computing.\n \"\"\"\n if args.world_size <= 1:\n # Setups up the `wandb` run.\n args.wandb_run = setup_wandb_run(0, args)\n\n # Run the experiment.\n run(0, args)\n\n else: # pragma: no cover\n try:\n mp.spawn(_run_preamble, (run, args), args.ngpus_per_node) # type: ignore[attr-defined]\n except KeyboardInterrupt:\n dist.destroy_process_group() # type: ignore[attr-defined]\n" }, "sourceLanguage": "Python" }, @@ -16762,9 +17115,9 @@ "startLine": 1, "startColumn": 1, "charOffset": 0, - "charLength": 15544, + "charLength": 19199, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Siamese models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"MinervaSiamese\",\n \"SimCLR\",\n \"SimCLR18\",\n \"SimCLR34\",\n \"SimCLR50\",\n \"SimSiam\",\n \"SimSiam18\",\n \"SimSiam34\",\n \"SimSiam50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport abc\nfrom typing import Any, Dict, Sequence, Tuple\n\nimport numpy as np\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\nfrom torch.nn.modules import Module\n\nfrom .core import MinervaBackbone, MinervaModel, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass MinervaSiamese(MinervaBackbone):\n \"\"\"Abstract class for Siamese models.\n\n Attributes:\n backbone (MinervaModel): The backbone encoder for the Siamese model.\n proj_head (~torch.nn.Module): The projection head for re-projecting the outputs\n from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n\n def __init__(self, *args, **kwargs) -> None:\n super().__init__(*args, **kwargs)\n\n self.backbone: MinervaModel\n self.proj_head: Module\n\n def forward(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Can be called directly as a method (e.g. ``model.forward()``) or when\n data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector ``A``.\n * Output feature vector ``B``.\n * Detached embedding, ``A``, from the :attr:`~MinervaSiamese.backbone`.\n * Detached embedding, ``B``, from the :attr:`~MinervaSiamese.backbone`.\n \"\"\"\n return self.forward_pair(x)\n\n def forward_pair(self, x: Tensor) -> Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]:\n \"\"\"Performs a forward pass of the network by using the forward methods of the backbone and\n feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Pair of batches of input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]: Tuple of:\n * Ouput feature vectors concated together.\n * Output feature vector A.\n * Output feature vector B.\n * Embedding, A, from the backbone.\n * Embedding, B, from the backbone.\n \"\"\"\n g_a, f_a = self.forward_single(x[0])\n g_b, f_b = self.forward_single(x[1])\n\n g = torch.cat([g_a, g_b], dim=0) # type: ignore[attr-defined]\n\n assert isinstance(g, Tensor)\n\n return g, g_a, g_b, f_a, f_b\n\n @abc.abstractmethod\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the backbone\n and feeding its output into the projection heads.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the projection head\n and the detached embedding vector from the backbone.\n \"\"\"\n raise NotImplementedError # pragma: no cover\n\n\nclass SimCLR(MinervaSiamese):\n \"\"\"Base SimCLR class to be subclassed by SimCLR variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimCLR.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimCLR that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from\n the :attr:`~SimCLR.backbone` encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimCLR.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimCLR, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n self.proj_head = nn.Sequential(\n nn.Linear(np.prod(backbone_out_shape), 512, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(512),\n nn.ReLU(inplace=True),\n nn.Linear(512, feature_dim, bias=False),\n )\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of the network by using the forward methods of the\n :attr:`~SimCLR.backbone` and feeding its output into the :attr:`~SimCLR.proj_head`.\n\n Overwrites :meth:`MinervaSiamese.forward_single`\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from the\n :attr:`~SimCLR.proj_head` and the detached embedding vector from the :attr:`~SimCLR.backbone`.\n \"\"\"\n f: Tensor = torch.flatten(self.backbone(x)[0], start_dim=1)\n g: Tensor = self.proj_head(f)\n\n return g, f\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n z, z_a, z_b, _, _ = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = self.criterion(z_a, z_b) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, z\n\n\nclass SimCLR18(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet18` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimCLR34(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet32` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimCLR50(SimCLR):\n \"\"\":class:`SimCLR` network using a :class:`~models.resnet.ResNet50` :attr:`~SimCLR.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n\n\nclass SimSiam(MinervaSiamese):\n \"\"\"Base SimSiam class to be subclassed by SimSiam variants.\n\n Subclasses :class:`MinervaSiamese`.\n\n Attributes:\n backbone_name (str): Name of the :attr:`~SimSiam.backbone` within this module to use.\n backbone (~torch.nn.Module): Backbone of SimSiam that takes the imagery input and\n extracts learned representations.\n proj_head (~torch.nn.Module): Projection head that takes the learned representations from the backbone encoder.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int, int, int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the :attr:`~SimSiam.backbone`\n packed up into a dict.\n \"\"\"\n\n __metaclass__ = abc.ABCMeta\n backbone_name = \"ResNet18\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, int, int] = (4, 256, 256),\n feature_dim: int = 128,\n pred_dim: int = 512,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(SimSiam, self).__init__(criterion=criterion, input_size=input_size)\n\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, encoder=True, **backbone_kwargs # type: ignore[arg-type]\n )\n\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n\n prev_dim = np.prod(backbone_out_shape)\n\n self.proj_head = nn.Sequential( # type: ignore[arg-type]\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # first layer\n nn.Linear(prev_dim, prev_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(prev_dim), # type: ignore[arg-type]\n nn.ReLU(inplace=True), # second layer\n nn.Linear(prev_dim, feature_dim, bias=False), # type: ignore[arg-type]\n nn.BatchNorm1d(feature_dim, affine=False),\n ) # output layer\n # self.proj_head[6].bias.requires_grad = False # hack: not use bias as it is followed by BN\n\n # Build a 2-layer predictor.\n self.predictor = nn.Sequential(\n nn.Linear(feature_dim, pred_dim, bias=False),\n nn.BatchNorm1d(pred_dim),\n nn.ReLU(inplace=True), # hidden layer\n nn.Linear(pred_dim, feature_dim),\n ) # output layer\n\n def forward_single(self, x: Tensor) -> Tuple[Tensor, Tensor]:\n \"\"\"Performs a forward pass of a single head of :class:`SimSiam` by using the forward methods of the backbone\n and feeding its output into the :attr:`~SimSiam.proj_head`.\n\n Args:\n x (~torch.Tensor): Batch of unpaired input data to the network.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Tuple of the feature vector outputted from :attr:`~SimSiam.proj_head`\n and the detached embedding vector from the :attr:`~SimSiam.backbone`.\n \"\"\"\n z: Tensor = self.proj_head(torch.flatten(self.backbone(x)[0], start_dim=1)) # type: ignore[attr-defined]\n\n p: Tensor = self.predictor(z)\n\n return p, z.detach()\n\n def step(self, x: Tensor, *args, train: bool = False) -> Tuple[Tensor, Tensor]:\n \"\"\"Overwrites :class:`~models.core.MinervaModel` to account for paired logits.\n\n Raises:\n NotImplementedError: If :attr:`~models.core.MinervaModel.optimiser` is ``None``.\n\n Args:\n x (~torch.Tensor): Batch of input data to network.\n train (bool): Sets whether this shall be a training step or not. ``True`` for training step which will then\n clear the :attr:`~models.core.MinervaModel.optimiser`, and perform a backward pass of the network then\n update the :attr:`~models.core.MinervaModel.optimiser`. If ``False`` for a validation or testing step,\n these actions are not taken.\n\n Returns:\n tuple[~torch.Tensor, ~torch.Tensor]: Loss computed by the loss function and a :class:`~torch.Tensor`\n with both projection's logits.\n \"\"\"\n\n if self.optimiser is None:\n raise NotImplementedError(\"Optimiser has not been set!\")\n\n assert self.criterion\n\n # Resets the optimiser's gradients if this is a training step.\n if train:\n self.optimiser.zero_grad()\n\n # Forward pass.\n p, p_a, p_b, z_a, z_b = self.forward(x)\n\n # Compute Loss.\n loss: Tensor = 0.5 * (self.criterion(z_a, p_b) + self.criterion(z_b, p_a)) # type: ignore[arg-type]\n\n # Performs a backward pass if this is a training step.\n if train:\n loss.backward()\n self.optimiser.step()\n\n return loss, p\n\n\nclass SimSiam18(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet18` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet18\"\n\n\nclass SimSiam34(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet34` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet34\"\n\n\nclass SimSiam50(SimSiam):\n \"\"\":class:`SimSiam` network using a :class:`~models.resnet.ResNet50` :attr:`~SimSiam.backbone`.\"\"\"\n\n backbone_name = \"ResNet50\"\n" + "text": "# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# MIT License\n\n# Copyright (c) 2023 Harry Baker\n\n# Permission is hereby granted, free of charge, to any person obtaining a copy\n# of this software and associated documentation files (the \"Software\"), to deal\n# in the Software without restriction, including without limitation the rights\n# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell\n# copies of the Software, and to permit persons to whom the Software is\n# furnished to do so, subject to the following conditions:\n\n# The above copyright notice and this permission notice shall be included in all\n# copies or substantial portions of the Software.\n\n# THE SOFTWARE IS PROVIDED \"AS IS\", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR\n# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,\n# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE\n# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER\n# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,\n# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE\n# SOFTWARE.\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Module to handle generic functionality for running :mod:`minerva` scripts.\n\nAttributes:\n GENERIC_PARSER (~argparse.ArgumentParser): A standard argparser with arguments for use in :mod:`minerva`.\n Can be used as the basis for a user defined extended argparser.\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n__all__ = [\n \"GENERIC_PARSER\",\n \"WandbConnectionManager\",\n \"setup_wandb_run\",\n \"config_env_vars\",\n \"config_args\",\n \"distributed_run\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nimport signal\nimport subprocess\nfrom argparse import Namespace\nfrom typing import Any, Callable, Optional, Union\n\nimport requests\nimport torch\nimport torch.distributed as dist\nimport torch.multiprocessing as mp\nfrom wandb.sdk.lib import RunDisabled\nfrom wandb.sdk.wandb_run import Run\n\nimport wandb\nfrom minerva.utils import CONFIG, MASTER_PARSER, utils\n\n# =====================================================================================================================\n# GLOBALS\n# =====================================================================================================================\n# ---+ CLI +--------------------------------------------------------------+\nGENERIC_PARSER = argparse.ArgumentParser(parents=[MASTER_PARSER])\n\nGENERIC_PARSER.add_argument(\n \"-o\",\n \"--override\",\n dest=\"override\",\n action=\"store_true\",\n help=\"Override config arguments with the CLI arguments where they overlap.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--seed\",\n dest=\"seed\",\n type=int,\n default=42,\n help=\"Set seed number\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-name\",\n dest=\"model_name\",\n type=str,\n help=\"Name of model.\"\n + \" Sub-string before hyphen is taken as model class name.\"\n + \" Sub-string past hyphen can be used to differeniate between versions.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--model-type\",\n dest=\"model_type\",\n type=str,\n help=\"Type of model. Should be 'segmentation', 'scene_classifier', 'siamese' or 'mlp'\",\n choices=(\"segmentation\", \"ssl\", \"siamese\", \"scene_classifier\", \"mlp\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--max_epochs\",\n dest=\"max_epochs\",\n type=int,\n default=100,\n help=\"Maximum number of training epochs.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--batch-size\",\n dest=\"batch_size\",\n type=int,\n default=8,\n help=\"Number of samples in each batch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--lr\",\n dest=\"lr\",\n type=float,\n default=0.01,\n help=\"Learning rate of the optimiser.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--optim-func\",\n dest=\"optim_func\",\n type=str,\n default=\"SGD\",\n help=\"Name of the optimiser to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``optim_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--loss-func\",\n dest=\"loss_func\",\n type=str,\n default=\"CrossEntropyLoss\",\n help=\"Name of the loss function to use. Only works for ``torch`` losses\"\n + \"(or if ``module`` is specified in the ``loss_params`` in the config)\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--pre-train\",\n dest=\"pre_train\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--fine-tune\",\n dest=\"fine_tune\",\n action=\"store_true\",\n help=\"Sets experiment type to fine-tune. Will load pre-trained backbone from file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--eval\",\n dest=\"eval\",\n action=\"store_true\",\n help=\"Sets experiment type to pre-train. Will save model to cache at end of training.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--balance\",\n dest=\"balance\",\n action=\"store_true\",\n help=\"Activates class balancing.\"\n + \" Depending on `model_type`, this will either be via sampling or weighting of the loss function.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--class-elim\",\n dest=\"elim\",\n action=\"store_true\",\n help=\"Eliminates classes that are specified in config but not present in the data.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--sample-pairs\",\n dest=\"sample_pairs\",\n action=\"store_true\",\n help=\"Use paired sampling. E.g. For Siamese models.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-model\",\n dest=\"save_model\",\n type=str,\n default=False,\n help=\"Whether to save the model at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically save the model to file.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--run-tensorboard\",\n dest=\"run_tensorboard\",\n type=str,\n default=False,\n help=\"Whether to run the Tensorboard logs at end of testing. Must be 'true', 'false' or 'auto'.\"\n + \" Setting 'auto' will automatically locate and run the logs on a local browser.\"\n + \" 'true' will ask the user whether to or not at runtime.\"\n + \" 'false' will not save the model and will not ask the user at runtime.\",\n choices=(\"true\", \"false\", \"auto\"),\n)\n\nGENERIC_PARSER.add_argument(\n \"--save-plots-no\",\n dest=\"save\",\n action=\"store_false\",\n help=\"Plots created will not be saved to file.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--show-plots\",\n dest=\"show\",\n action=\"store_true\",\n help=\"Show plots created in a window.\"\n + \" Warning: Do not use with a terminal-less operation, e.g. SLURM.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--print-dist\",\n dest=\"p_dist\",\n action=\"store_true\",\n help=\"Print the distribution of classes within the data to `stdout`.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--plot-last-epoch\",\n dest=\"plot_last_epoch\",\n action=\"store_true\",\n help=\"Plot the results from the final validation epoch.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log\",\n dest=\"wandb_log\",\n action=\"store_true\",\n help=\"Activate Weights and Biases logging.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--project_name\",\n dest=\"project\",\n type=str,\n help=\"Name of the Weights and Biases project this experiment belongs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-entity\",\n dest=\"entity\",\n type=str,\n help=\"The Weights and Biases entity to send runs to.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-dir\",\n dest=\"wandb_dir\",\n type=str,\n default=\"./wandb\",\n help=\"Where to store the Weights and Biases logs locally.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--wandb-log-all\",\n dest=\"log_all\",\n action=\"store_true\",\n help=\"Will log each process on Weights and Biases. Otherwise, logging will be performed from the master process.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--knn-k\",\n dest=\"knn_k\",\n type=int,\n default=200,\n help=\"Top k most similar images used to predict the image for KNN validation.\",\n)\n\nGENERIC_PARSER.add_argument(\n \"--val-freq\",\n dest=\"val_freq\",\n type=int,\n default=5,\n help=\"Perform a validation epoch with KNN for every ``val_freq``\"\n + \"training epochs for SSL or Siamese models.\",\n)\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass WandbConnectionManager:\n \"\"\"Checks for a connection to :mod:`wandb`. If not, sets :mod:`wandb` to offline during context.\"\"\"\n\n def __init__(self) -> None:\n try:\n requests.head(\"http://www.wandb.ai/\", timeout=0.1)\n self._on = True\n except requests.ConnectionError:\n self._on = False\n\n def __enter__(self) -> None:\n if self._on:\n os.environ[\"WANDB_MODE\"] = \"online\"\n else:\n os.environ[\"WANDB_MODE\"] = \"offline\"\n\n def __exit__(self, exc_type, exc_value, exc_traceback) -> None:\n os.environ[\"WANDB_MODE\"] = \"online\"\n\n\n# =====================================================================================================================\n# METHODS\n# =====================================================================================================================\ndef _handle_sigusr1(signum, frame) -> None: # pragma: no cover\n subprocess.Popen( # nosec B602\n f'scontrol requeue {os.getenv(\"SLURM_JOB_ID\")}',\n shell=True,\n )\n exit()\n\n\ndef _handle_sigterm(signum, frame) -> None: # pragma: no cover\n pass\n\n\ndef setup_wandb_run(gpu: int, args: Namespace) -> Optional[Union[Run, RunDisabled]]:\n \"\"\"Sets up a :mod:`wandb` logger for either every process, the master process or not if not logging.\n\n Note:\n ``args`` must contain these keys:\n\n * ``wandb_log`` (bool): Activate :mod:`wandb` logging.\n * | ``log_all`` (bool): :mod:`wandb` logging on every process if ``True``.\n | Only log on master process if ``False``.\n * ``entity`` (str): :mod:`wandb` entity where to send runs to.\n * ``project`` (str): Name of the :mod:`wandb` project this experiment belongs to.\n * ``world_size`` (int): Total number of processes across the experiment.\n\n Args:\n gpu (int): Local process (GPU) number.\n args (~argparse.Namespace): CLI arguments from :mod:`argparse`.\n\n Returns:\n ~wandb.sdk.wandb_run.Run | ~wandb.sdk.lib.RunDisabled | None: The :mod:`wandb` run object\n for this process or ``None`` if ``log_all=False`` and ``rank!=0``.\n \"\"\"\n run: Optional[Union[Run, RunDisabled]] = None\n if CONFIG.get(\"wandb_log\", False) or CONFIG.get(\"project\", None):\n try:\n if CONFIG.get(\"log_all\", False) and args.world_size > 1:\n run = wandb.init( # pragma: no cover\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n group=CONFIG.get(\"group\", \"DDP\"),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n else:\n if gpu == 0:\n run = wandb.init(\n entity=CONFIG.get(\"entity\", None),\n project=CONFIG.get(\"project\", None),\n dir=CONFIG.get(\"wandb_dir\", None),\n name=args.jobid,\n )\n CONFIG[\"wandb_log\"] = True\n except wandb.UsageError: # type: ignore[attr-defined] # pragma: no cover\n print(\n \"wandb API Key has not been inited.\",\n \"\\nEither call wandb.login(key=[your_api_key]) or use `wandb login` in the shell.\",\n \"\\nOr if not using wandb, safely ignore this message.\",\n )\n CONFIG[\"wandb_log\"] = False\n except wandb.errors.Error as err: # type: ignore[attr-defined] # pragma: no cover\n print(err)\n CONFIG[\"wandb_log\"] = False\n else:\n print(\"Weights and Biases logging OFF\")\n\n return run\n\n\ndef config_env_vars(args: Namespace) -> Namespace:\n \"\"\"Finds SLURM environment variables (if they exist) and configures args accordingly.\n\n If SLURM variables are found in the environment variables, the arguments are configured for a SLURM job:\n\n * ``args.rank`` is set to the ``SLURM_NODEID * args.ngpus_per_node``.\n * ``args.world_size`` is set to ``SLURM_NNODES * args.ngpus_per_node``.\n * ``args.dist_url`` is set to ``tcp://{host_name}:58472``\n\n If SLURM variables are not detected, the arguments are configured for a single-node job:\n\n * ``args.rank=0``.\n * ``args.world_size=args.ngpus_per_node``.\n * ``args.dist_url = \"tcp://localhost:58472\"``.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n if \"SLURM_JOB_ID\" in os.environ: # pragma: no cover\n # Single-node and multi-node distributed training on SLURM cluster.\n # Requeue job on SLURM preemption.\n signal.signal(signal.SIGUSR1, _handle_sigusr1) # type: ignore[attr-defined]\n signal.signal(signal.SIGTERM, _handle_sigterm)\n\n # Get SLURM variables.\n slurm_job_nodelist: Optional[str] = os.getenv(\"SLURM_JOB_NODELIST\")\n slurm_nodeid: Optional[str] = os.getenv(\"SLURM_NODEID\")\n slurm_nnodes: Optional[str] = os.getenv(\"SLURM_NNODES\")\n slurm_jobid: Optional[str] = os.getenv(\"SLURM_JOB_ID\")\n\n # Check that SLURM variables have been found.\n assert slurm_job_nodelist is not None\n assert slurm_nodeid is not None\n assert slurm_nnodes is not None\n assert slurm_jobid is not None\n\n # Find a common host name on all nodes.\n # Assume scontrol returns hosts in the same order on all nodes.\n cmd = \"scontrol show hostnames \" + slurm_job_nodelist\n stdout = subprocess.check_output(cmd.split())\n host_name = stdout.decode().splitlines()[0]\n args.rank = int(slurm_nodeid) * args.ngpus_per_node\n args.world_size = int(slurm_nnodes) * args.ngpus_per_node\n args.dist_url = f\"tcp://{host_name}:58472\"\n args.jobid = slurm_jobid\n\n else:\n # Single-node distributed training.\n args.rank = 0\n args.dist_url = \"tcp://localhost:58472\"\n args.world_size = args.ngpus_per_node\n args.jobid = None\n\n return args\n\n\ndef config_args(args: Namespace) -> Namespace:\n \"\"\"Prepare the arguments generated from the :mod:`argparse` CLI for the job run.\n\n * Finds and sets ``args.ngpus_per_node``;\n * updates the ``CONFIG`` with new arguments from the CLI;\n * sets the seeds from the seed found in ``CONFIG`` or from CLI;\n * uses :func:`config_env_vars` to determine the correct arguments for distributed computing jobs e.g. SLURM.\n\n Args:\n args (~argparse.Namespace): Arguments from the CLI ``parser`` from :mod:`argparse`.\n\n Returns:\n ~argparse.Namespace: Inputted arguments with the addition of ``rank``, ``dist_url``\n and ``world_sized`` attributes.\n \"\"\"\n args.ngpus_per_node = torch.cuda.device_count()\n\n # Convert CLI arguments to dict.\n args_dict = vars(args)\n\n # Find which CLI arguments are not in the config.\n new_args = {key: args_dict[key] for key in args_dict if key not in CONFIG}\n\n # Updates the config with new arguments from the CLI.\n CONFIG.update(new_args)\n\n # Overrides the arguments from the config with those of the CLI where they overlap.\n # WARNING: This will include the use of the default CLI arguments.\n if args_dict.get(\"override\"): # pragma: no cover\n updated_args = {\n key: args_dict[key]\n for key in args_dict\n if args_dict[key] != CONFIG[key] and args_dict[key] is not None\n }\n CONFIG.update(updated_args)\n\n # Get seed from config.\n seed = CONFIG.get(\"seed\", 42)\n\n # Set torch, numpy and inbuilt seeds for reproducibility.\n utils.set_seeds(seed)\n\n return config_env_vars(args)\n\n\ndef _run_preamble(\n gpu: int, run: Callable[[int, Namespace], Any], args: Namespace\n) -> None: # pragma: no cover\n # Calculates the global rank of this process.\n args.rank += gpu\n\n # Setups the `wandb` run for this process.\n args.wandb_run = setup_wandb_run(gpu, args)\n\n if args.world_size > 1:\n dist.init_process_group( # type: ignore[attr-defined]\n backend=\"gloo\",\n init_method=args.dist_url,\n world_size=args.world_size,\n rank=args.rank,\n )\n print(f\"INITIALISED PROCESS ON {args.rank}\")\n\n if torch.cuda.is_available():\n torch.cuda.set_device(gpu)\n torch.backends.cudnn.benchmark = True # type: ignore\n\n # Start this process run.\n run(gpu, args)\n\n\ndef distributed_run(run: Callable[[int, Namespace], Any], args: Namespace) -> None:\n \"\"\"Runs the supplied function and arguments with distributed computing according to arguments.\n\n :func:`_run_preamble` adds some additional commands to initialise the process group for each run\n and allocating the GPU device number to use before running the supplied function.\n\n Note:\n ``args`` must contain the attributes ``rank``, ``world_size`` and ``dist_url``. These can be\n configured using :func:`config_env_vars` or :func:`config_args`.\n\n Args:\n run (~typing.Callable[[int, ~argparse.Namespace], ~typing.Any]): Function to run with distributed computing.\n args (~argparse.Namespace): Arguments for the run and to specify the variables for distributed computing.\n \"\"\"\n if args.world_size <= 1:\n # Setups up the `wandb` run.\n args.wandb_run = setup_wandb_run(0, args)\n\n # Run the experiment.\n run(0, args)\n\n else: # pragma: no cover\n try:\n mp.spawn(_run_preamble, (run, args), args.ngpus_per_node) # type: ignore[attr-defined]\n except KeyboardInterrupt:\n dist.destroy_process_group() # type: ignore[attr-defined]\n" } } }, @@ -16777,44 +17130,44 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "2002621e591342c95a68ca9d455aba9dff486017b662051549509788425f753e" + "equalIndicator/v1": "a554f897d3fab78a93bf3b641463d1964121266fe30b799093f3786010412c50" }, "properties": { "ideaSeverity": "ERROR" } }, { - "ruleId": "PyInterpreterInspection", + "ruleId": "PyUnresolvedReferencesInspection", "kind": "fail", "level": "error", "message": { - "text": "No Python interpreter configured for the project", - "markdown": "No Python interpreter configured for the project" + "text": "No module named 'os'", + "markdown": "No module named 'os'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/MinervaExp.py", + "uri": "docs/conf.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1, - "startColumn": 1, - "charOffset": 0, - "charLength": 3546, + "startLine": 14, + "startColumn": 8, + "charOffset": 584, + "charLength": 2, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to execute the creation, fitting and testing of a computer vision neural network model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n\n# TODO: Add ability to conduct hyper-parameter iterative variation experimentation.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nimport argcomplete\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu,\n rank=args.rank,\n world_size=args.world_size,\n wandb_run=args.wandb_run,\n **CONFIG,\n )\n\n if not CONFIG.get(\"eval\", False):\n trainer.fit()\n\n if CONFIG.get(\"pre_train\", False) and gpu == 0:\n trainer.save_backbone()\n trainer.close()\n\n if not CONFIG.get(\"pre_train\", False):\n trainer.test()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n argcomplete.autocomplete(parser)\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Print Minerva banner.\n utils._print_banner()\n\n with runner.WandbConnectionManager():\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + "text": "os" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 12, "startColumn": 1, - "charOffset": 0, - "charLength": 3546, + "charOffset": 495, + "charLength": 103, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# PYTHON_ARGCOMPLETE_OK\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to execute the creation, fitting and testing of a computer vision neural network model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n\n# TODO: Add ability to conduct hyper-parameter iterative variation experimentation.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nimport argcomplete\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu,\n rank=args.rank,\n world_size=args.world_size,\n wandb_run=args.wandb_run,\n **CONFIG,\n )\n\n if not CONFIG.get(\"eval\", False):\n trainer.fit()\n\n if CONFIG.get(\"pre_train\", False) and gpu == 0:\n trainer.save_backbone()\n trainer.close()\n\n if not CONFIG.get(\"pre_train\", False):\n trainer.test()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n argcomplete.autocomplete(parser)\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Print Minerva banner.\n utils._print_banner()\n\n with runner.WandbConnectionManager():\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + "text": "# documentation root, use os.path.abspath to make it absolute, like shown here.\n#\nimport os\nimport sys\n" } } }, @@ -16827,44 +17180,44 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "a7e7273e1f651dd7466ec371f423f30a9a9c3d0140801a9d5b2e0f3dc027597e" + "equalIndicator/v1": "211d099f33fd667001e148bc98c60f21c83cb151d450deb51fe907fbdfd84c3a" }, "properties": { "ideaSeverity": "ERROR" } }, { - "ruleId": "PyInterpreterInspection", + "ruleId": "PyUnresolvedReferencesInspection", "kind": "fail", "level": "error", "message": { - "text": "No Python interpreter configured for the project", - "markdown": "No Python interpreter configured for the project" + "text": "No module named 'sys'", + "markdown": "No module named 'sys'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/ManifestMake.py", + "uri": "docs/conf.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1, - "startColumn": 1, - "charOffset": 0, - "charLength": 2495, + "startLine": 15, + "startColumn": 8, + "charOffset": 594, + "charLength": 3, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to create manifests of data for use in Minerva pre-processing to reduce computation time.\"\"\"\n# TODO: Re-engineer for use with torchvision style datasets.\n# TODO: Consider use of parquet format rather than csv.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom minerva.datasets import make_manifest\nfrom minerva.utils import CONFIG, universal_path, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main():\n manifest = make_manifest(CONFIG)\n\n print(manifest)\n\n output_dir = universal_path(CONFIG[\"dir\"][\"cache\"])\n\n fn = output_dir / f\"{utils.get_dataset_name()}_Manifest.csv\"\n\n print(f\"MANIFEST TO FILE -----> {fn}\")\n manifest.to_csv(fn)\n\n\nif __name__ == \"__main__\":\n main()\n" + "text": "sys" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 13, "startColumn": 1, - "charOffset": 0, - "charLength": 2495, + "charOffset": 575, + "charLength": 74, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Script to create manifests of data for use in Minerva pre-processing to reduce computation time.\"\"\"\n# TODO: Re-engineer for use with torchvision style datasets.\n# TODO: Consider use of parquet format rather than csv.\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom minerva.datasets import make_manifest\nfrom minerva.utils import CONFIG, universal_path, utils\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main():\n manifest = make_manifest(CONFIG)\n\n print(manifest)\n\n output_dir = universal_path(CONFIG[\"dir\"][\"cache\"])\n\n fn = output_dir / f\"{utils.get_dataset_name()}_Manifest.csv\"\n\n print(f\"MANIFEST TO FILE -----> {fn}\")\n manifest.to_csv(fn)\n\n\nif __name__ == \"__main__\":\n main()\n" + "text": "#\nimport os\nimport sys\n\nsys.path.insert(0, os.path.abspath(\"../minerva/\"))" } } }, @@ -16877,44 +17230,44 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "53d480f49fbfd42f01b2c5a1180231f5ddbb2ee55ab69d0be794f7612491028b" + "equalIndicator/v1": "cf12309451abbbfbdba801dfcc2bc6d567d45ca1163301a88a1e59ddcbcd4361" }, "properties": { "ideaSeverity": "ERROR" } }, { - "ruleId": "PyInterpreterInspection", + "ruleId": "PyUnresolvedReferencesInspection", "kind": "fail", "level": "error", "message": { - "text": "No Python interpreter configured for the project", - "markdown": "No Python interpreter configured for the project" + "text": "No module named 'argparse'", + "markdown": "No module named 'argparse'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/MinervaClusterVis.py", + "uri": "minerva/utils/__init__.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1, - "startColumn": 1, - "charOffset": 0, - "charLength": 2996, + "startLine": 54, + "startColumn": 8, + "charOffset": 2588, + "charLength": 8, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Adaptation of ``MinervaExp.py`` for cluster visualisation of a model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(gpu=gpu, rank=args.rank, world_size=args.world_size, **CONFIG)\n\n trainer.tsne_cluster()\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + "text": "argparse" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 52, "startColumn": 1, - "charOffset": 0, - "charLength": 2996, + "charOffset": 2399, + "charLength": 232, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Adaptation of ``MinervaExp.py`` for cluster visualisation of a model.\n\nDesigned for use in SLURM clusters and with distributed computing support.\n\nSome code derived from Barlow Twins implementation of distributed computing:\nhttps://github.com/facebookresearch/barlowtwins\n\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(gpu=gpu, rank=args.rank, world_size=args.world_size, **CONFIG)\n\n trainer.tsne_cluster()\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + "text": "# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport os\nfrom pathlib import Path" } } }, @@ -16927,44 +17280,44 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "9db4c0574404cf485f77f83d8dd863cb75fb99638d421a6dcefbee479875cd5e" + "equalIndicator/v1": "1af1c4d628f0edc4a543c8544c44ffaf8f7d0c69b46eb6516040354bae9d8b46" }, "properties": { "ideaSeverity": "ERROR" } }, { - "ruleId": "PyInterpreterInspection", + "ruleId": "PyUnresolvedReferencesInspection", "kind": "fail", "level": "error", "message": { - "text": "No Python interpreter configured for the project", - "markdown": "No Python interpreter configured for the project" + "text": "No module named 'os'", + "markdown": "No module named 'os'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "scripts/Torch_to_ONNX.py", + "uri": "minerva/utils/__init__.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1, - "startColumn": 1, - "charOffset": 0, - "charLength": 2993, + "startLine": 55, + "startColumn": 8, + "charOffset": 2604, + "charLength": 2, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Converts :mod:`torch` model weights to ``ONNX`` format.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, universal_path\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu, rank=args.rank, world_size=args.world_size, verbose=False, **CONFIG\n )\n\n weights_path = universal_path(CONFIG[\"dir\"][\"cache\"]) / CONFIG[\"pre_train_name\"]\n\n trainer.save_model(fn=weights_path, format=\"onnx\")\n\n print(f\"Model saved to --> {weights_path}.onnx\")\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + "text": "os" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 53, "startColumn": 1, - "charOffset": 0, - "charLength": 2993, + "charOffset": 2461, + "charLength": 198, "snippet": { - "text": "#!/usr/bin/env python\n# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU General Public License for more details.\n#\n# You should have received a copy of the GNU General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n\"\"\"Converts :mod:`torch` model weights to ``ONNX`` format.\"\"\"\n\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU GPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer\nfrom minerva.utils import CONFIG, runner, universal_path\n\n\n# =====================================================================================================================\n# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu, rank=args.rank, world_size=args.world_size, verbose=False, **CONFIG\n )\n\n weights_path = universal_path(CONFIG[\"dir\"][\"cache\"]) / CONFIG[\"pre_train_name\"]\n\n trainer.save_model(fn=weights_path, format=\"onnx\")\n\n print(f\"Model saved to --> {weights_path}.onnx\")\n\n if gpu == 0:\n trainer.close()\n\n\nif __name__ == \"__main__\":\n # ---+ CLI +--------------------------------------------------------------+\n parser = argparse.ArgumentParser(parents=[runner.GENERIC_PARSER], add_help=False)\n\n # ------------ ADD EXTRA ARGS FOR THE PARSER HERE ------------------------+\n\n # Export args from CLI.\n cli_args = parser.parse_args()\n\n # Configure the arguments and environment variables.\n runner.config_args(cli_args)\n\n # Run the specified main with distributed computing and the arguments provided.\n runner.distributed_run(main, cli_args)\n" + "text": "# =====================================================================================================================\nimport argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional" } } }, @@ -16977,44 +17330,44 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "370213d4db59d464cd40580d034cc01b807daf5ec852820bfb55e22187323e63" + "equalIndicator/v1": "dcec182fb643a05fd343a2b8d9e601c3a182b7db99351d1d4fa56a2c6177b1c5" }, "properties": { "ideaSeverity": "ERROR" } }, { - "ruleId": "PyInterpreterInspection", + "ruleId": "PyUnresolvedReferencesInspection", "kind": "fail", "level": "error", "message": { - "text": "No Python interpreter configured for the project", - "markdown": "No Python interpreter configured for the project" + "text": "Unresolved reference 'pathlib'", + "markdown": "Unresolved reference 'pathlib'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "notebooks/Visualise_Siamese_Pair.ipynb", + "uri": "minerva/utils/__init__.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1, - "startColumn": 1, - "charOffset": 0, - "charLength": 5172, + "startLine": 56, + "startColumn": 6, + "charOffset": 2612, + "charLength": 7, "snippet": { - "text": "{\n \"cells\": [\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"Copyright (C) 2023 Harry Baker\\n\",\n \"\\n\",\n \"This program is free software: you can redistribute it and/or modify\\n\",\n \"it under the terms of the GNU General Public License as published by\\n\",\n \"the Free Software Foundation, either version 3 of the License, or\\n\",\n \"(at your option) any later version.\\n\",\n \"\\n\",\n \"This program is distributed in the hope that it will be useful,\\n\",\n \"but WITHOUT ANY WARRANTY; without even the implied warranty of\\n\",\n \"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n\",\n \"GNU General Public License for more details.\\n\",\n \"\\n\",\n \"You should have received a copy of the GNU General Public License\\n\",\n \"along with this program in LICENSE.txt. If not,\\n\",\n \"see .\\n\",\n \"\\n\",\n \"@org: University of Southampton\\n\",\n \"Created under a project funded by the Ordnance Survey Ltd.\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"Author: Harry Baker\\n\",\n \"\\n\",\n \"Contact: hjb1d20@soton.ac.uk\\n\",\n \"\\n\",\n \"License: GNU GPLv3\\n\",\n \"\\n\",\n \"Copyright: Copyright (C) 2023 Harry Baker\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Visualise Siamese Pairs\\n\",\n \"\\n\",\n \"This is a short notebook to visualise the two patches from a pair used in Siamese learning to check the behaviour of the transforms. \"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Import\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"import os\\n\",\n \"import matplotlib.pyplot as plt\\n\",\n \"\\n\",\n \"from minerva.trainer import Trainer\\n\",\n \"from minerva.utils import config_load\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Load config\\n\",\n \"\\n\",\n \"For this demonstration, we'll be using the inbuilt `example_GeoCLR_config.yml` config. For this to be successfully loaded in this notebook, we need to change to the `inbuilt_cfgs` directory before loading the config. We then change up to the repositry root level as this config is namely designed to work with pytest running from the root.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"os.chdir(\\\"../inbuilt_cfgs/\\\")\\n\",\n \"CONFIG, _ = config_load.load_configs(\\\"example_GeoCLR_config.yml\\\")\\n\",\n \"os.chdir(\\\"..\\\")\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Init Trainer\\n\",\n \"\\n\",\n \"Initialises a trainer object using the config we just loaded. `Trainer` is the main entry point to `minerva` so it is by far the easiest way of obtaining a pair of samples from the dataset.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"trainer = Trainer(gpu=0, verbose=False, **CONFIG)\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Get Batch of Data\\n\",\n \"\\n\",\n \"Now we can use the train dataset loader constructed with `trainer` to get a batch of the data out.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"batch = next(iter(trainer.loaders[\\\"train\\\"]))\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Plotting Images\\n\",\n \"\\n\",\n \"Then it is just a simple task of extracting the images from each side of the pair within the batch of data and and plotting just the RGB channels.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"image1 = batch[0][\\\"image\\\"][0][0:3, :, :].permute(1, 2, 0)\\n\",\n \"plt.imshow(image1)\\n\",\n \"plt.axis(\\\"off\\\")\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"image2 = batch[1][\\\"image\\\"][0][0:3, :, :].permute(1, 2, 0)\\n\",\n \"plt.imshow(image2)\\n\",\n \"plt.axis(\\\"off\\\")\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"Hopefully these results will show two different patches and that the transforms are indeed working.\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"minerva-310\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.10.9\"\n },\n \"orig_nbformat\": 4,\n \"vscode\": {\n \"interpreter\": {\n \"hash\": \"3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff\"\n }\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n" + "text": "pathlib" }, - "sourceLanguage": "JupyterPython" + "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 54, "startColumn": 1, - "charOffset": 0, - "charLength": 5172, + "charOffset": 2581, + "charLength": 79, "snippet": { - "text": "{\n \"cells\": [\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"Copyright (C) 2023 Harry Baker\\n\",\n \"\\n\",\n \"This program is free software: you can redistribute it and/or modify\\n\",\n \"it under the terms of the GNU General Public License as published by\\n\",\n \"the Free Software Foundation, either version 3 of the License, or\\n\",\n \"(at your option) any later version.\\n\",\n \"\\n\",\n \"This program is distributed in the hope that it will be useful,\\n\",\n \"but WITHOUT ANY WARRANTY; without even the implied warranty of\\n\",\n \"MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\\n\",\n \"GNU General Public License for more details.\\n\",\n \"\\n\",\n \"You should have received a copy of the GNU General Public License\\n\",\n \"along with this program in LICENSE.txt. If not,\\n\",\n \"see .\\n\",\n \"\\n\",\n \"@org: University of Southampton\\n\",\n \"Created under a project funded by the Ordnance Survey Ltd.\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"Author: Harry Baker\\n\",\n \"\\n\",\n \"Contact: hjb1d20@soton.ac.uk\\n\",\n \"\\n\",\n \"License: GNU GPLv3\\n\",\n \"\\n\",\n \"Copyright: Copyright (C) 2023 Harry Baker\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"# Visualise Siamese Pairs\\n\",\n \"\\n\",\n \"This is a short notebook to visualise the two patches from a pair used in Siamese learning to check the behaviour of the transforms. \"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Import\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"import os\\n\",\n \"import matplotlib.pyplot as plt\\n\",\n \"\\n\",\n \"from minerva.trainer import Trainer\\n\",\n \"from minerva.utils import config_load\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Load config\\n\",\n \"\\n\",\n \"For this demonstration, we'll be using the inbuilt `example_GeoCLR_config.yml` config. For this to be successfully loaded in this notebook, we need to change to the `inbuilt_cfgs` directory before loading the config. We then change up to the repositry root level as this config is namely designed to work with pytest running from the root.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"os.chdir(\\\"../inbuilt_cfgs/\\\")\\n\",\n \"CONFIG, _ = config_load.load_configs(\\\"example_GeoCLR_config.yml\\\")\\n\",\n \"os.chdir(\\\"..\\\")\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Init Trainer\\n\",\n \"\\n\",\n \"Initialises a trainer object using the config we just loaded. `Trainer` is the main entry point to `minerva` so it is by far the easiest way of obtaining a pair of samples from the dataset.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"trainer = Trainer(gpu=0, verbose=False, **CONFIG)\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Get Batch of Data\\n\",\n \"\\n\",\n \"Now we can use the train dataset loader constructed with `trainer` to get a batch of the data out.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"batch = next(iter(trainer.loaders[\\\"train\\\"]))\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"### Plotting Images\\n\",\n \"\\n\",\n \"Then it is just a simple task of extracting the images from each side of the pair within the batch of data and and plotting just the RGB channels.\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"image1 = batch[0][\\\"image\\\"][0][0:3, :, :].permute(1, 2, 0)\\n\",\n \"plt.imshow(image1)\\n\",\n \"plt.axis(\\\"off\\\")\"\n ]\n },\n {\n \"cell_type\": \"code\",\n \"execution_count\": null,\n \"metadata\": {},\n \"outputs\": [],\n \"source\": [\n \"image2 = batch[1][\\\"image\\\"][0][0:3, :, :].permute(1, 2, 0)\\n\",\n \"plt.imshow(image2)\\n\",\n \"plt.axis(\\\"off\\\")\"\n ]\n },\n {\n \"attachments\": {},\n \"cell_type\": \"markdown\",\n \"metadata\": {},\n \"source\": [\n \"Hopefully these results will show two different patches and that the transforms are indeed working.\"\n ]\n }\n ],\n \"metadata\": {\n \"kernelspec\": {\n \"display_name\": \"minerva-310\",\n \"language\": \"python\",\n \"name\": \"python3\"\n },\n \"language_info\": {\n \"codemirror_mode\": {\n \"name\": \"ipython\",\n \"version\": 3\n },\n \"file_extension\": \".py\",\n \"mimetype\": \"text/x-python\",\n \"name\": \"python\",\n \"nbconvert_exporter\": \"python\",\n \"pygments_lexer\": \"ipython3\",\n \"version\": \"3.10.9\"\n },\n \"orig_nbformat\": 4,\n \"vscode\": {\n \"interpreter\": {\n \"hash\": \"3564bae54b830248e5fcf548a4e349b732e585ece6f047dc1ae97c29756580ff\"\n }\n }\n },\n \"nbformat\": 4,\n \"nbformat_minor\": 2\n}\n" + "text": "import argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n" } } }, @@ -17027,44 +17380,94 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "bd7242ff0b9797260e483f27ff785c1215fcbe6c93d98caecefe80b79a427bdf" + "equalIndicator/v1": "6f27443007170057874af77ee6ebb5d353c3caac469663cb9ea59eb1b1222ee9" }, "properties": { "ideaSeverity": "ERROR" } }, { - "ruleId": "PyInterpreterInspection", + "ruleId": "PyUnresolvedReferencesInspection", "kind": "fail", "level": "error", "message": { - "text": "No Python interpreter configured for the project", - "markdown": "No Python interpreter configured for the project" + "text": "No module named 'argparse'", + "markdown": "No module named 'argparse'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/models/fcn.py", + "uri": "scripts/Torch_to_ONNX.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 1, + "startLine": 35, + "startColumn": 8, + "charOffset": 1643, + "charLength": 8, + "snippet": { + "text": "argparse" + }, + "sourceLanguage": "Python" + }, + "contextRegion": { + "startLine": 33, "startColumn": 1, - "charOffset": 0, - "charLength": 16926, + "charOffset": 1454, + "charLength": 234, + "snippet": { + "text": "# IMPORTS\n# =====================================================================================================================\nimport argparse\n\nfrom minerva.trainer import Trainer" + } + } + }, + "logicalLocations": [ + { + "fullyQualifiedName": "project", + "kind": "module" + } + ] + } + ], + "partialFingerprints": { + "equalIndicator/v1": "23c5351fa9104415211620a446116daf48d8b26c2a74a2d3cb2a5762dba8827b" + }, + "properties": { + "ideaSeverity": "ERROR" + } + }, + { + "ruleId": "PyUnresolvedReferencesInspection", + "kind": "fail", + "level": "error", + "message": { + "text": "Unresolved reference 'Path'", + "markdown": "Unresolved reference 'Path'" + }, + "locations": [ + { + "physicalLocation": { + "artifactLocation": { + "uri": "minerva/utils/__init__.py", + "uriBaseId": "SRCROOT" + }, + "region": { + "startLine": 56, + "startColumn": 21, + "charOffset": 2627, + "charLength": 4, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Fully Convolutional Network (FCN) models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n__all__ = [\n \"FCN\",\n \"DCN\",\n \"FCN8ResNet18\",\n \"FCN8ResNet34\",\n \"FCN8ResNet50\",\n \"FCN8ResNet101\",\n \"FCN8ResNet152\",\n \"FCN16ResNet18\",\n \"FCN16ResNet34\",\n \"FCN16ResNet50\",\n \"FCN32ResNet18\",\n \"FCN32ResNet34\",\n \"FCN32ResNet50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Dict, Literal, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\n\nfrom .core import MinervaBackbone, MinervaModel, bilinear_init, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass FCN(MinervaBackbone):\n \"\"\"Base Fully Convolutional Network (FCN) class to be subclassed by FCN variants described in the FCN paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Subclasses :class:`~models.MinervaModel`.\n\n Attributes:\n backbone_name (str): Optional; Name of the backbone within this module to use for the FCN.\n decoder_variant (str): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n backbone (~torch.nn.Module): Backbone of the FCN that takes the imagery input and\n extracts learned representations.\n decoder (~torch.nn.Module): Decoder that takes the learned representations from the backbone encoder\n and de-convolves to output a classification segmentation mask.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in data to be classified.\n batch_size (int): Optional; Number of samples in each batch supplied to the network.\n Only needed for Decoder, not DCN.\n backbone_weight_path (str): Optional; Path to pre-trained weights for the backbone to be loaded.\n freeze_backbone (bool): Freezes the weights on the backbone to prevent end-to-end training\n if using a pre-trained backbone.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the backbone packed up into a dict.\n \"\"\"\n\n backbone_name: str = \"ResNet18\"\n decoder_variant: Literal[\"32\", \"16\", \"8\"] = \"32\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, ...] = (4, 256, 256),\n n_classes: int = 8,\n backbone_weight_path: Optional[str] = None,\n freeze_backbone: bool = False,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(FCN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n # Initialises the selected Minerva backbone.\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, n_classes=n_classes, encoder=True, **backbone_kwargs # type: ignore\n )\n\n # Loads and graphts the pre-trained weights ontop of the backbone if the path is provided.\n if backbone_weight_path is not None: # pragma: no cover\n self.backbone.load_state_dict(torch.load(backbone_weight_path))\n\n # Freezes the weights of backbone to avoid end-to-end training.\n if freeze_backbone:\n self.backbone.requires_grad_(False)\n\n # Determines the output shape of the backbone so the correct input shape is known\n # for the proceeding layers of the network.\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n self.decoder = DCN(\n in_channel=backbone_out_shape[0],\n n_classes=n_classes,\n variant=self.decoder_variant,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the FCN by using the forward methods of the backbone and\n feeding its output into the forward for the decoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass DCN(MinervaModel):\n \"\"\"Generic DCN defined by the FCN paper. Can construct the DCN32, DCN16 or DCN8 variants defined in the paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Attributes:\n variant (~typing.Literal['32', '16', '8']): Defines which DCN variant this object is, altering the\n layers constructed and the computational graph. Will be either ``'32'``, ``'16'`` or ``'8'``.\n See the FCN paper for details on these variants.\n n_classes (int): Number of classes in dataset. Defines number of output classification channels.\n relu (~torch.nn.ReLU): Rectified Linear Unit (ReLU) activation layer to be used throughout the network.\n Conv1x1 (~torch.nn.Conv2d): First Conv1x1 layer acting as input to the network from the final output of\n the encoder and common to all variants.\n bn1 (~torch.nn.BatchNorm2d): First batch norm layer common to all variants that comes after Conv1x1.\n DC32 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 32 for DCN32 variant.\n dbn32 (~torch.nn.BatchNorm2d): Batch norm layer after DC32.\n Conv1x1_x3 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n third layer from the ResNet encoder.\n DC2 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN16 & DCN8 variants.\n dbn2 (~torch.nn.BatchNorm2d): Batch norm layer after DC2.\n DC16 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 16 for DCN16 variant.\n dbn16 (~torch.nn.BatchNorm2d): Batch norm layer after DC16.\n Conv1x1_x2 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n second layer from the ResNet encoder.\n DC4 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN8 variant.\n dbn4 (~torch.nn.BatchNorm2d): Batch norm layer after DC4.\n DC8 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 8 for DCN8 variant.\n dbn8 (~torch.nn.BatchNorm2d): Batch norm layer after DC8.\n\n Args:\n in_channel (int): Optional; Number of channels in the input layer of the network.\n Should match the number of output channels (likely feature maps) from the encoder.\n n_classes (int): Optional; Number of classes in dataset. Defines number of output classification channels.\n variant (~typing.Literal['32', '16', '8']): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n\n def __init__(\n self,\n in_channel: int = 512,\n n_classes: int = 21,\n variant: Literal[\"32\", \"16\", \"8\"] = \"32\",\n ) -> None:\n super(DCN, self).__init__(n_classes=n_classes)\n self.variant: Literal[\"32\", \"16\", \"8\"] = variant\n\n assert type(self.n_classes) is int\n\n # Common to all variants.\n self.relu = nn.ReLU(inplace=True)\n self.Conv1x1 = nn.Conv2d(in_channel, self.n_classes, kernel_size=(1, 1))\n self.bn1 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"32\":\n self.DC32 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(64, 64),\n stride=(32, 32),\n dilation=1,\n padding=(16, 16),\n )\n self.DC32.weight.data = bilinear_init(self.n_classes, self.n_classes, 64)\n self.dbn32 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant in (\"16\", \"8\"):\n self.Conv1x1_x3 = nn.Conv2d(\n int(in_channel / 2), self.n_classes, kernel_size=(1, 1)\n )\n self.DC2 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC2.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn2 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"16\":\n self.DC16 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(32, 32),\n stride=(16, 16),\n dilation=1,\n padding=(8, 8),\n )\n self.DC16.weight.data = bilinear_init(self.n_classes, self.n_classes, 32)\n self.dbn16 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"8\":\n self.Conv1x1_x2 = nn.Conv2d(\n int(in_channel / 4), self.n_classes, kernel_size=(1, 1)\n )\n\n self.DC4 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC4.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn4 = nn.BatchNorm2d(self.n_classes)\n\n self.DC8 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(16, 16),\n stride=(8, 8),\n dilation=1,\n padding=(4, 4),\n )\n self.DC8.weight.data = bilinear_init(self.n_classes, self.n_classes, 16)\n self.dbn8 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n def forward(self, x: Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]) -> Tensor:\n \"\"\"Performs a forward pass of the decoder. Depending on DCN variant, will take multiple inputs\n throughout pass from the encoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]): Input data to network.\n Should be from a backbone that supports output at multiple points e.g ResNet.\n\n Returns:\n ~torch.Tensor: Segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n # Unpack outputs from the ResNet layers.\n x4, x3, x2, *_ = x\n\n # All DCNs have a common 1x1 Conv input block.\n z = self.bn1(self.relu(self.Conv1x1(x4)))\n\n # If DCN32, forward pass through DC32 and DBN32 and return output.\n if self.variant == \"32\":\n z = self.dbn32(self.relu(self.DC32(z)))\n assert isinstance(z, Tensor)\n return z\n\n # Common Conv1x1 layer to DCN16 & DCN8.\n x3 = self.bn1(self.relu(self.Conv1x1_x3(x3)))\n z = self.dbn2(self.relu(self.DC2(z)))\n\n z = z + x3\n\n # If DCN16, forward pass through DCN16 and DBN16 and return output.\n if self.variant == \"16\":\n z = self.dbn16(self.relu(self.DC16(z)))\n assert isinstance(z, Tensor)\n return z\n\n # If DCN8, continue through remaining layers to output.\n else:\n x2 = self.bn1(self.relu(self.Conv1x1_x2(x2)))\n z = self.dbn4(self.relu(self.DC4(z)))\n\n z = z + x2\n\n z = self.dbn8(self.relu(self.DC8(z)))\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass FCN32ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"32\"\n\n\nclass FCN16ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"16\"\n\n\nclass FCN8ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet101(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet101` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet101\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet152(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet152` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet152\"\n decoder_variant = \"8\"\n" + "text": "Path" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 54, "startColumn": 1, - "charOffset": 0, - "charLength": 16926, + "charOffset": 2581, + "charLength": 79, "snippet": { - "text": "# -*- coding: utf-8 -*-\n# Copyright (C) 2023 Harry Baker\n#\n# This program is free software: you can redistribute it and/or modify\n# it under the terms of the GNU Lesser General Public License as published by\n# the Free Software Foundation, either version 3 of the License, or\n# (at your option) any later version.\n#\n# This program is distributed in the hope that it will be useful,\n# but WITHOUT ANY WARRANTY; without even the implied warranty of\n# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the\n# GNU Lesser General Public License for more details.\n#\n# You should have received a copy of the GNU Lesser General Public License\n# along with this program in LICENSE.txt. If not,\n# see .\n#\n# @org: University of Southampton\n# Created under a project funded by the Ordnance Survey Ltd.\n#\n\"\"\"Module containing Fully Convolutional Network (FCN) models.\"\"\"\n# =====================================================================================================================\n# METADATA\n# =====================================================================================================================\n__author__ = \"Harry Baker\"\n__contact__ = \"hjb1d20@soton.ac.uk\"\n__license__ = \"GNU LGPLv3\"\n__copyright__ = \"Copyright (C) 2023 Harry Baker\"\n\n__all__ = [\n \"FCN\",\n \"DCN\",\n \"FCN8ResNet18\",\n \"FCN8ResNet34\",\n \"FCN8ResNet50\",\n \"FCN8ResNet101\",\n \"FCN8ResNet152\",\n \"FCN16ResNet18\",\n \"FCN16ResNet34\",\n \"FCN16ResNet50\",\n \"FCN32ResNet18\",\n \"FCN32ResNet34\",\n \"FCN32ResNet50\",\n]\n\n# =====================================================================================================================\n# IMPORTS\n# =====================================================================================================================\nfrom typing import Any, Dict, Literal, Optional, Sequence, Tuple\n\nimport torch\nimport torch.nn.modules as nn\nfrom torch import Tensor\n\nfrom .core import MinervaBackbone, MinervaModel, bilinear_init, get_model\n\n\n# =====================================================================================================================\n# CLASSES\n# =====================================================================================================================\nclass FCN(MinervaBackbone):\n \"\"\"Base Fully Convolutional Network (FCN) class to be subclassed by FCN variants described in the FCN paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Subclasses :class:`~models.MinervaModel`.\n\n Attributes:\n backbone_name (str): Optional; Name of the backbone within this module to use for the FCN.\n decoder_variant (str): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n backbone (~torch.nn.Module): Backbone of the FCN that takes the imagery input and\n extracts learned representations.\n decoder (~torch.nn.Module): Decoder that takes the learned representations from the backbone encoder\n and de-convolves to output a classification segmentation mask.\n\n Args:\n criterion: :mod:`torch` loss function model will use.\n input_size (tuple[int] | list[int]): Optional; Defines the shape of the input data in\n order of number of channels, image width, image height.\n n_classes (int): Optional; Number of classes in data to be classified.\n batch_size (int): Optional; Number of samples in each batch supplied to the network.\n Only needed for Decoder, not DCN.\n backbone_weight_path (str): Optional; Path to pre-trained weights for the backbone to be loaded.\n freeze_backbone (bool): Freezes the weights on the backbone to prevent end-to-end training\n if using a pre-trained backbone.\n backbone_kwargs (dict[str, ~typing.Any]): Optional; Keyword arguments for the backbone packed up into a dict.\n \"\"\"\n\n backbone_name: str = \"ResNet18\"\n decoder_variant: Literal[\"32\", \"16\", \"8\"] = \"32\"\n\n def __init__(\n self,\n criterion: Any,\n input_size: Tuple[int, ...] = (4, 256, 256),\n n_classes: int = 8,\n backbone_weight_path: Optional[str] = None,\n freeze_backbone: bool = False,\n backbone_kwargs: Dict[str, Any] = {},\n ) -> None:\n super(FCN, self).__init__(\n criterion=criterion, input_size=input_size, n_classes=n_classes\n )\n\n # Initialises the selected Minerva backbone.\n self.backbone: MinervaModel = get_model(self.backbone_name)(\n input_size=input_size, n_classes=n_classes, encoder=True, **backbone_kwargs # type: ignore\n )\n\n # Loads and graphts the pre-trained weights ontop of the backbone if the path is provided.\n if backbone_weight_path is not None: # pragma: no cover\n self.backbone.load_state_dict(torch.load(backbone_weight_path))\n\n # Freezes the weights of backbone to avoid end-to-end training.\n if freeze_backbone:\n self.backbone.requires_grad_(False)\n\n # Determines the output shape of the backbone so the correct input shape is known\n # for the proceeding layers of the network.\n self.backbone.determine_output_dim()\n\n backbone_out_shape = self.backbone.output_shape\n assert isinstance(backbone_out_shape, Sequence)\n self.decoder = DCN(\n in_channel=backbone_out_shape[0],\n n_classes=n_classes,\n variant=self.decoder_variant,\n )\n\n def forward(self, x: Tensor) -> Tensor:\n \"\"\"Performs a forward pass of the FCN by using the forward methods of the backbone and\n feeding its output into the forward for the decoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (~torch.Tensor): Input data to network.\n\n Returns:\n ~torch.Tensor: segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n \"\"\"\n z = self.backbone(x)\n z = self.decoder(z)\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass DCN(MinervaModel):\n \"\"\"Generic DCN defined by the FCN paper. Can construct the DCN32, DCN16 or DCN8 variants defined in the paper.\n\n Based on the example found here: https://github.com/haoran1062/FCN-pytorch/blob/master/FCN.py\n\n Attributes:\n variant (~typing.Literal['32', '16', '8']): Defines which DCN variant this object is, altering the\n layers constructed and the computational graph. Will be either ``'32'``, ``'16'`` or ``'8'``.\n See the FCN paper for details on these variants.\n n_classes (int): Number of classes in dataset. Defines number of output classification channels.\n relu (~torch.nn.ReLU): Rectified Linear Unit (ReLU) activation layer to be used throughout the network.\n Conv1x1 (~torch.nn.Conv2d): First Conv1x1 layer acting as input to the network from the final output of\n the encoder and common to all variants.\n bn1 (~torch.nn.BatchNorm2d): First batch norm layer common to all variants that comes after Conv1x1.\n DC32 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 32 for DCN32 variant.\n dbn32 (~torch.nn.BatchNorm2d): Batch norm layer after DC32.\n Conv1x1_x3 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n third layer from the ResNet encoder.\n DC2 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN16 & DCN8 variants.\n dbn2 (~torch.nn.BatchNorm2d): Batch norm layer after DC2.\n DC16 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 16 for DCN16 variant.\n dbn16 (~torch.nn.BatchNorm2d): Batch norm layer after DC16.\n Conv1x1_x2 (~torch.nn.Conv2d): Conv1x1 layer acting as input to the network taking the output from the\n second layer from the ResNet encoder.\n DC4 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 2 for DCN8 variant.\n dbn4 (~torch.nn.BatchNorm2d): Batch norm layer after DC4.\n DC8 (~torch.nn.ConvTranspose2d): De-convolutional layer with stride 8 for DCN8 variant.\n dbn8 (~torch.nn.BatchNorm2d): Batch norm layer after DC8.\n\n Args:\n in_channel (int): Optional; Number of channels in the input layer of the network.\n Should match the number of output channels (likely feature maps) from the encoder.\n n_classes (int): Optional; Number of classes in dataset. Defines number of output classification channels.\n variant (~typing.Literal['32', '16', '8']): Optional; Flag for which DCN variant to construct.\n Must be either ``'32'``, ``'16'`` or ``'8'``. See the FCN paper for details on these variants.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n\n def __init__(\n self,\n in_channel: int = 512,\n n_classes: int = 21,\n variant: Literal[\"32\", \"16\", \"8\"] = \"32\",\n ) -> None:\n super(DCN, self).__init__(n_classes=n_classes)\n self.variant: Literal[\"32\", \"16\", \"8\"] = variant\n\n assert type(self.n_classes) is int\n\n # Common to all variants.\n self.relu = nn.ReLU(inplace=True)\n self.Conv1x1 = nn.Conv2d(in_channel, self.n_classes, kernel_size=(1, 1))\n self.bn1 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"32\":\n self.DC32 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(64, 64),\n stride=(32, 32),\n dilation=1,\n padding=(16, 16),\n )\n self.DC32.weight.data = bilinear_init(self.n_classes, self.n_classes, 64)\n self.dbn32 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant in (\"16\", \"8\"):\n self.Conv1x1_x3 = nn.Conv2d(\n int(in_channel / 2), self.n_classes, kernel_size=(1, 1)\n )\n self.DC2 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC2.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn2 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"16\":\n self.DC16 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(32, 32),\n stride=(16, 16),\n dilation=1,\n padding=(8, 8),\n )\n self.DC16.weight.data = bilinear_init(self.n_classes, self.n_classes, 32)\n self.dbn16 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant == \"8\":\n self.Conv1x1_x2 = nn.Conv2d(\n int(in_channel / 4), self.n_classes, kernel_size=(1, 1)\n )\n\n self.DC4 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(4, 4),\n stride=(2, 2),\n dilation=1,\n padding=(1, 1),\n )\n self.DC4.weight.data = bilinear_init(self.n_classes, self.n_classes, 4)\n self.dbn4 = nn.BatchNorm2d(self.n_classes)\n\n self.DC8 = nn.ConvTranspose2d(\n self.n_classes,\n self.n_classes,\n kernel_size=(16, 16),\n stride=(8, 8),\n dilation=1,\n padding=(4, 4),\n )\n self.DC8.weight.data = bilinear_init(self.n_classes, self.n_classes, 16)\n self.dbn8 = nn.BatchNorm2d(self.n_classes)\n\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n def forward(self, x: Tuple[Tensor, Tensor, Tensor, Tensor, Tensor]) -> Tensor:\n \"\"\"Performs a forward pass of the decoder. Depending on DCN variant, will take multiple inputs\n throughout pass from the encoder.\n\n Can be called directly as a method (e.g. ``model.forward()``)\n or when data is parsed to model (e.g. ``model()``).\n\n Args:\n x (tuple[~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor, ~torch.Tensor]): Input data to network.\n Should be from a backbone that supports output at multiple points e.g ResNet.\n\n Returns:\n ~torch.Tensor: Segmentation mask with a channel for each class of the likelihoods the network places on\n each pixel input ``x`` being of that class.\n\n Raises:\n NotImplementedError: Raised if ``variant`` does not match known types.\n \"\"\"\n if self.variant not in (\"32\", \"16\", \"8\"):\n raise NotImplementedError(\n f\"Variant {self.variant} does not match known types\"\n )\n\n # Unpack outputs from the ResNet layers.\n x4, x3, x2, *_ = x\n\n # All DCNs have a common 1x1 Conv input block.\n z = self.bn1(self.relu(self.Conv1x1(x4)))\n\n # If DCN32, forward pass through DC32 and DBN32 and return output.\n if self.variant == \"32\":\n z = self.dbn32(self.relu(self.DC32(z)))\n assert isinstance(z, Tensor)\n return z\n\n # Common Conv1x1 layer to DCN16 & DCN8.\n x3 = self.bn1(self.relu(self.Conv1x1_x3(x3)))\n z = self.dbn2(self.relu(self.DC2(z)))\n\n z = z + x3\n\n # If DCN16, forward pass through DCN16 and DBN16 and return output.\n if self.variant == \"16\":\n z = self.dbn16(self.relu(self.DC16(z)))\n assert isinstance(z, Tensor)\n return z\n\n # If DCN8, continue through remaining layers to output.\n else:\n x2 = self.bn1(self.relu(self.Conv1x1_x2(x2)))\n z = self.dbn4(self.relu(self.DC4(z)))\n\n z = z + x2\n\n z = self.dbn8(self.relu(self.DC8(z)))\n\n assert isinstance(z, Tensor)\n return z\n\n\nclass FCN32ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"32\"\n\n\nclass FCN32ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN32`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"32\"\n\n\nclass FCN16ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"16\"\n\n\nclass FCN16ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN16`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"16\"\n\n\nclass FCN8ResNet18(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet18` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet18\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet34(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet34` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet34\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet50(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet50` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet50\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet101(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet101` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet101\"\n decoder_variant = \"8\"\n\n\nclass FCN8ResNet152(FCN):\n \"\"\"\n Fully Convolutional Network (FCN) using a :class:`~models.resnet.ResNet152` backbone\n with a ``DCN8`` decoder.\n \"\"\"\n\n backbone_name = \"ResNet152\"\n decoder_variant = \"8\"\n" + "text": "import argparse\nimport os\nfrom pathlib import Path\nfrom typing import Optional\n" } } }, @@ -17077,7 +17480,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "090703aa81d71f6f8c363746aacb7d9642b4619f35aed70d57ff5e04fcfc92eb" + "equalIndicator/v1": "8fc9dbc1d61451bd323719a388c4ab2674b3b2221b0be5a03784cca7a66d19d5" }, "properties": { "ideaSeverity": "ERROR" @@ -17088,33 +17491,33 @@ "kind": "fail", "level": "error", "message": { - "text": "Unresolved reference 'setuptools'", - "markdown": "Unresolved reference 'setuptools'" + "text": "Unresolved reference 'int'", + "markdown": "Unresolved reference 'int'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "setup.py", + "uri": "scripts/Torch_to_ONNX.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 2, - "startColumn": 6, - "charOffset": 29, - "charLength": 10, + "startLine": 44, + "startColumn": 15, + "charOffset": 2062, + "charLength": 3, "snippet": { - "text": "setuptools" + "text": "int" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 42, "startColumn": 1, - "charOffset": 0, - "charLength": 80, + "charOffset": 1868, + "charLength": 321, "snippet": { - "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nif __name__ == \"__main__\":" + "text": "# MAIN\n# =====================================================================================================================\ndef main(gpu: int, args) -> None:\n trainer = Trainer(\n gpu=gpu, rank=args.rank, world_size=args.world_size, verbose=False, **CONFIG" } } }, @@ -17127,7 +17530,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "d50598d8ec65aa86e8a59e3be22a5ba63e63e851e673426eb9a1f0e1af89d446" + "equalIndicator/v1": "a7d4834e314e921b4c3d6f86b8e0c8884833214f176d9c6a5db708ed994d85dc" }, "properties": { "ideaSeverity": "ERROR" @@ -17138,33 +17541,33 @@ "kind": "fail", "level": "error", "message": { - "text": "Unresolved reference 'setup'", - "markdown": "Unresolved reference 'setup'" + "text": "Unresolved reference 'print'", + "markdown": "Unresolved reference 'print'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "setup.py", + "uri": "scripts/Torch_to_ONNX.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 2, - "startColumn": 24, - "charOffset": 47, + "startLine": 53, + "startColumn": 5, + "charOffset": 2340, "charLength": 5, "snippet": { - "text": "setup" + "text": "print" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 1, + "startLine": 51, "startColumn": 1, - "charOffset": 0, - "charLength": 80, + "charOffset": 2283, + "charLength": 123, "snippet": { - "text": "# -*- coding: utf-8 -*-\nfrom setuptools import setup\n\nif __name__ == \"__main__\":" + "text": " trainer.save_model(fn=weights_path, fmt=\"onnx\")\n\n print(f\"Model saved to --> {weights_path}.onnx\")\n\n if gpu == 0:" } } }, @@ -17177,7 +17580,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "f5d6a28123db521d34cd408d3c5dabb5b3c5aa7f4ac60f19ae394ae62a9262e9" + "equalIndicator/v1": "9872d6e1ea496f5145944216700fbe9aca4546a45b4cc0d6588d01f5c1dce4c5" }, "properties": { "ideaSeverity": "ERROR" @@ -17188,33 +17591,33 @@ "kind": "fail", "level": "error", "message": { - "text": "Unresolved reference 'pathlib'", - "markdown": "Unresolved reference 'pathlib'" + "text": "No module named 'argparse'", + "markdown": "No module named 'argparse'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/pytorchtools.py", + "uri": "scripts/MinervaPipe.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 37, - "startColumn": 6, - "charOffset": 2014, - "charLength": 7, + "startLine": 35, + "startColumn": 8, + "charOffset": 1678, + "charLength": 8, "snippet": { - "text": "pathlib" + "text": "argparse" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 35, + "startLine": 33, "startColumn": 1, - "charOffset": 1828, - "charLength": 251, + "charOffset": 1489, + "charLength": 226, "snippet": { - "text": "# IMPORTS\n# =====================================================================================================================\nfrom pathlib import Path\nfrom typing import Callable, Optional, Union\n" + "text": "# IMPORTS\n# =====================================================================================================================\nimport argparse\nimport subprocess\nimport sys" } } }, @@ -17227,7 +17630,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "c24a8bfa9719d2440246bec2b2b16932303d5e2d346b4570840972b2ca6a29d4" + "equalIndicator/v1": "443d16e9d3a4ece57f8b11e381ca09db7c4364fd9933121b99b22a36ba9a6ab0" }, "properties": { "ideaSeverity": "ERROR" @@ -17238,33 +17641,33 @@ "kind": "fail", "level": "error", "message": { - "text": "Unresolved reference 'Path'", - "markdown": "Unresolved reference 'Path'" + "text": "No module named 'subprocess'", + "markdown": "No module named 'subprocess'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/pytorchtools.py", + "uri": "scripts/MinervaPipe.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 37, - "startColumn": 21, - "charOffset": 2029, - "charLength": 4, + "startLine": 36, + "startColumn": 8, + "charOffset": 1694, + "charLength": 10, "snippet": { - "text": "Path" + "text": "subprocess" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 35, + "startLine": 34, "startColumn": 1, - "charOffset": 1828, - "charLength": 251, + "charOffset": 1551, + "charLength": 193, "snippet": { - "text": "# IMPORTS\n# =====================================================================================================================\nfrom pathlib import Path\nfrom typing import Callable, Optional, Union\n" + "text": "# =====================================================================================================================\nimport argparse\nimport subprocess\nimport sys\nfrom typing import Any, Dict" } } }, @@ -17277,7 +17680,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "9387920855972637b06107a6ef95c2063b3e4c775fb6d19d8a70df9e8b7bff42" + "equalIndicator/v1": "c51f800c67e9a46684513790049e7fdf6184d2d69864619855223950467ac3e0" }, "properties": { "ideaSeverity": "ERROR" @@ -17288,33 +17691,33 @@ "kind": "fail", "level": "error", "message": { - "text": "Unresolved reference 'typing'", - "markdown": "Unresolved reference 'typing'" + "text": "No module named 'sys'", + "markdown": "No module named 'sys'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/pytorchtools.py", + "uri": "scripts/MinervaPipe.py", "uriBaseId": "SRCROOT" }, "region": { - "startLine": 38, - "startColumn": 6, - "charOffset": 2039, - "charLength": 6, + "startLine": 37, + "startColumn": 8, + "charOffset": 1712, + "charLength": 3, "snippet": { - "text": "typing" + "text": "sys" }, "sourceLanguage": "Python" }, "contextRegion": { - "startLine": 36, + "startLine": 35, "startColumn": 1, - "charOffset": 1889, - "charLength": 209, + "charOffset": 1671, + "charLength": 74, "snippet": { - "text": "# =====================================================================================================================\nfrom pathlib import Path\nfrom typing import Callable, Optional, Union\n\nimport numpy as np" + "text": "import argparse\nimport subprocess\nimport sys\nfrom typing import Any, Dict\n" } } }, @@ -17327,7 +17730,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "2d06d47fbc69879ea614dede0a701b2afc765ed9464b7007d10d3e18ba4d2146" + "equalIndicator/v1": "0d4fc55ce662e21c7c1a885b0d94542a38640f3fa9ee65c7bbee7ba225c834d0" }, "properties": { "ideaSeverity": "ERROR" @@ -17338,33 +17741,33 @@ "kind": "fail", "level": "error", "message": { - "text": "Unresolved reference 'Callable'", - "markdown": "Unresolved reference 'Callable'" + "text": "Unresolved reference 'typing'", + "markdown": "Unresolved reference 'typing'" }, "locations": [ { "physicalLocation": { "artifactLocation": { - "uri": "minerva/pytorchtools.py", + "uri": "scripts/MinervaPipe.py", "uriBaseId": "SRCROOT" }, "region": { "startLine": 38, - "startColumn": 20, - "charOffset": 2053, - "charLength": 8, + "startColumn": 6, + "charOffset": 1721, + "charLength": 6, "snippet": { - "text": "Callable" + "text": "typing" }, "sourceLanguage": "Python" }, "contextRegion": { "startLine": 36, "startColumn": 1, - "charOffset": 1889, - "charLength": 209, + "charOffset": 1687, + "charLength": 70, "snippet": { - "text": "# =====================================================================================================================\nfrom pathlib import Path\nfrom typing import Callable, Optional, Union\n\nimport numpy as np" + "text": "import subprocess\nimport sys\nfrom typing import Any, Dict\n\nimport yaml" } } }, @@ -17377,7 +17780,7 @@ } ], "partialFingerprints": { - "equalIndicator/v1": "d8bda575416d1bd266907b85b22bba5f6645e48f18eadabb3b89c61c8b947fbd" + "equalIndicator/v1": "b512b5387135bfec81f7deb9ed5a5d9bb61a142a01729b3ac436fda1abeac346" }, "properties": { "ideaSeverity": "ERROR" @@ -17385,7 +17788,7 @@ } ], "configProfile": "absent", - "deviceId": "200820300000000-4dbb-3e66-9807-55718c8b8a29" + "deviceId": "200820300000000-aeb8-7dfb-2319-e05a933d2f21" } } ] diff --git a/scripts/Torch_to_ONNX.py b/scripts/Torch_to_ONNX.py index 287382b10..ba6770088 100644 --- a/scripts/Torch_to_ONNX.py +++ b/scripts/Torch_to_ONNX.py @@ -48,7 +48,7 @@ def main(gpu: int, args) -> None: weights_path = universal_path(CONFIG["dir"]["cache"]) / CONFIG["pre_train_name"] - trainer.save_model(fn=weights_path, format="onnx") + trainer.save_model(fn=weights_path, fmt="onnx") print(f"Model saved to --> {weights_path}.onnx") diff --git a/setup.cfg b/setup.cfg index afafb01da..585b616bc 100644 --- a/setup.cfg +++ b/setup.cfg @@ -15,7 +15,7 @@ classifiers = Programming Language :: Python :: 3.9 Programming Language :: Python :: 3.8 Programming Language :: Python :: 3 :: Only - License :: OSI Approved :: GNU Lesser General Public License v3 (GPLv3) + License :: OSI Approved :: MIT License Development Status :: 4 - Beta Operating System :: POSIX :: Linux Natural Language :: English diff --git a/tests/test_datasets.py b/tests/test_datasets.py index 266664ef6..97a3fd9e8 100644 --- a/tests/test_datasets.py +++ b/tests/test_datasets.py @@ -76,8 +76,8 @@ def test_make_bounding_box() -> None: def test_tinydataset(img_root: Path, lc_root: Path) -> None: """Source of TIFF: https://github.com/mommermi/geotiff_sample""" - imagery = TstImgDataset(img_root) - labels = TstMaskDataset(lc_root) + imagery = TstImgDataset(str(img_root)) + labels = TstMaskDataset(str(lc_root)) dataset = imagery & labels assert isinstance(dataset, IntersectionDataset) @@ -85,13 +85,13 @@ def test_tinydataset(img_root: Path, lc_root: Path) -> None: def test_paired_datasets(img_root: Path) -> None: dataset = PairedDataset(TstImgDataset, img_root) - dataset2 = TstImgDataset(img_root) + dataset2 = TstImgDataset(str(img_root)) with pytest.raises( ValueError, match=f"Intersecting a dataset of {type(dataset2)} and a PairedDataset is not supported!", ): - _ = dataset & dataset2 + _ = dataset & dataset2 # type: ignore[operator] bounds = BoundingBox(411248.0, 412484.0, 4058102.0, 4059399.0, 0, 1e12) query_1 = get_random_bounding_box(bounds, (32, 32), 10.0) @@ -113,7 +113,7 @@ def test_paired_datasets(img_root: Path) -> None: assert type(dataset.__repr__()) == str assert isinstance( - dataset.plot_random_sample((32, 32), 1.0, suptitle="test"), plt.Figure + dataset.plot_random_sample((32, 32), 1.0, suptitle="test"), plt.Figure # type: ignore[attr-defined] ) @@ -128,8 +128,8 @@ def dataset_test(_dataset) -> None: bounds = BoundingBox(411248.0, 412484.0, 4058102.0, 4059399.0, 0, 1e12) - dataset1 = TstImgDataset(img_root) - dataset2 = TstImgDataset(img_root) + dataset1 = TstImgDataset(str(img_root)) + dataset2 = TstImgDataset(str(img_root)) dataset3 = PairedDataset(TstImgDataset, img_root) dataset4 = PairedDataset(TstImgDataset, img_root) @@ -137,7 +137,7 @@ def dataset_test(_dataset) -> None: ValueError, match=f"Unionising a dataset of {type(dataset2)} and a PairedDataset is not supported!", ): - _ = dataset3 | dataset2 + _ = dataset3 | dataset2 # type: ignore[operator] union_dataset1 = PairedUnionDataset(dataset1, dataset2) union_dataset2 = dataset3 | dataset4 diff --git a/tests/test_trainer.py b/tests/test_trainer.py index 1d5edb0d4..1b28012c7 100644 --- a/tests/test_trainer.py +++ b/tests/test_trainer.py @@ -100,7 +100,7 @@ def test_trainer_2() -> None: trainer1 = Trainer(0, **params1) with pytest.raises(ValueError): - trainer1.save_model(format="unkown") + trainer1.save_model(fmt="unkown") suffix = "onnx" try: @@ -108,8 +108,7 @@ def test_trainer_2() -> None: except ValueError: suffix = "pt" - trainer1.save_model(fn=trainer1.get_model_cache_path(), format=suffix) - + trainer1.save_model(fn=trainer1.get_model_cache_path(), fmt=suffix) params2 = CONFIG.copy() params2["pre_train_name"] = f"{params1['model_name'].split('-')[0]}.{suffix}" params2["sample_pairs"] = "false"