From 6934232e1807f34a04f65b57ab460c76008b36e2 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 18 Sep 2024 11:37:17 -0400 Subject: [PATCH 01/18] examples: adapt tensorflow-mnist-classifer demo to dioptra 1.0 examples: working training-only example with v1client examples: working example of mnist-classifer with new client examples: update notebook for documentation and slight cleanliness examples: re-arrange cells & add mlflowrun capability to client examples: add metric pull from mlflow for demo examples: update demo & epochs parameter default examples: add uuid's to demo examples: moving mnist classifier to new folder and cleaning up notebook examples: updated training YML to be more condensed and understandable examples: cleaning up demo example examples: fix metrics in demo example --- examples/mnist-classifier-demo/README.md | 22 + examples/mnist-classifier-demo/demo.ipynb | 422 +++++++ examples/mnist-classifier-demo/src/fgm.yml | 215 ++++ examples/mnist-classifier-demo/src/infer.yml | 123 ++ examples/mnist-classifier-demo/src/train.yml | 203 +++ examples/scripts/client.py | 1120 ++++++++++++++--- examples/scripts/setup.py | 251 ++++ .../fgm_mnist_demo/artifacts_exceptions.py | 23 + .../fgm_mnist_demo/artifacts_mlflow.py | 225 ++++ .../fgm_mnist_demo/artifacts_restapi.py | 169 +++ .../fgm_mnist_demo/artifacts_utils.py | 117 ++ .../fgm_mnist_demo/attacks_fgm.py | 295 +++++ .../backend_configs_tensorflow.py | 52 + .../fgm_mnist_demo/data_tensorflow.py | 130 ++ .../estimators_keras_classifiers.py | 230 ++++ .../fgm_mnist_demo/estimators_methods.py | 122 ++ .../fgm_mnist_demo/import_keras.py | 65 + .../fgm_mnist_demo/metrics_distance.py | 307 +++++ .../fgm_mnist_demo/metrics_exceptions.py | 27 + .../dioptra_custom/fgm_mnist_demo/mlflow.py | 103 ++ .../dioptra_custom/fgm_mnist_demo/plugins.py | 244 ++++ .../fgm_mnist_demo/random_rng.py | 56 + .../fgm_mnist_demo/random_sample.py | 89 ++ .../fgm_mnist_demo/registry_art.py | 107 ++ .../fgm_mnist_demo/registry_mlflow.py | 120 ++ .../fgm_mnist_demo/tensorflow.py | 84 ++ .../fgm_mnist_demo/tracking_mlflow.py | 99 ++ .../dioptra_custom/vc/artifacts_exceptions.py | 23 + .../dioptra_custom/vc/artifacts_mlflow.py | 241 ++++ .../dioptra_custom/vc/artifacts_restapi.py | 151 +++ .../dioptra_custom/vc/artifacts_utils.py | 117 ++ .../dioptra_custom/vc/attacks_fgm.py | 305 +++++ .../vc/backend_configs_tensorflow.py | 52 + .../task-plugins/dioptra_custom/vc/builtin.py | 208 +++ .../dioptra_custom/vc/data_tensorflow.py | 128 ++ .../vc/defenses_image_preprocessing.py | 201 +++ .../vc/estimators_keras_classifiers.py | 231 ++++ .../dioptra_custom/vc/estimators_methods.py | 122 ++ .../dioptra_custom/vc/import_keras.py | 65 + .../dioptra_custom/vc/metrics_distance.py | 307 +++++ .../dioptra_custom/vc/metrics_exceptions.py | 27 + .../task-plugins/dioptra_custom/vc/mlflow.py | 103 ++ .../dioptra_custom/vc/random_rng.py | 56 + .../dioptra_custom/vc/random_sample.py | 89 ++ .../dioptra_custom/vc/registry_art.py | 107 ++ .../dioptra_custom/vc/registry_mlflow.py | 120 ++ .../dioptra_custom/vc/tensorflow.py | 112 ++ .../dioptra_custom/vc/tracking_mlflow.py | 99 ++ .../README.md | 22 + .../demo.ipynb | 669 ++++++++++ .../src/fgm.yml | 320 +++++ .../src/infer.yml | 255 ++++ .../src/train.yml | 371 ++++++ 53 files changed, 9362 insertions(+), 159 deletions(-) create mode 100644 examples/mnist-classifier-demo/README.md create mode 100644 examples/mnist-classifier-demo/demo.ipynb create mode 100644 examples/mnist-classifier-demo/src/fgm.yml create mode 100644 examples/mnist-classifier-demo/src/infer.yml create mode 100644 examples/mnist-classifier-demo/src/train.yml create mode 100644 examples/scripts/setup.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_exceptions.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_restapi.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_utils.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/backend_configs_tensorflow.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_keras_classifiers.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_methods.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/import_keras.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_distance.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_exceptions.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_rng.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_sample.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/tracking_mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py create mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py create mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_utils.py create mode 100644 examples/task-plugins/dioptra_custom/vc/attacks_fgm.py create mode 100644 examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/builtin.py create mode 100644 examples/task-plugins/dioptra_custom/vc/data_tensorflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py create mode 100644 examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py create mode 100644 examples/task-plugins/dioptra_custom/vc/estimators_methods.py create mode 100644 examples/task-plugins/dioptra_custom/vc/import_keras.py create mode 100644 examples/task-plugins/dioptra_custom/vc/metrics_distance.py create mode 100644 examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py create mode 100644 examples/task-plugins/dioptra_custom/vc/mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/random_rng.py create mode 100644 examples/task-plugins/dioptra_custom/vc/random_sample.py create mode 100644 examples/task-plugins/dioptra_custom/vc/registry_art.py create mode 100644 examples/task-plugins/dioptra_custom/vc/registry_mlflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/tensorflow.py create mode 100644 examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py create mode 100644 examples/v1-client-tensorflow-mnist-classifier/README.md create mode 100644 examples/v1-client-tensorflow-mnist-classifier/demo.ipynb create mode 100644 examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml create mode 100644 examples/v1-client-tensorflow-mnist-classifier/src/infer.yml create mode 100644 examples/v1-client-tensorflow-mnist-classifier/src/train.yml diff --git a/examples/mnist-classifier-demo/README.md b/examples/mnist-classifier-demo/README.md new file mode 100644 index 000000000..55340bbdc --- /dev/null +++ b/examples/mnist-classifier-demo/README.md @@ -0,0 +1,22 @@ +# Tensorflow MNIST Classifier demo + +This example demonstrates how to run a simple experiment on the transferability of the fast gradient method (FGM) evasion attack between two neural network architectures. +The demo can be found in the Jupyter notebook file [demo.ipynb](demo.ipynb). + +## Running the example + +To prepare your environment for running this example, follow the linked instructions below: + +1. [Create and activate a Python virtual environment and install the necessary dependencies](../README.md#creating-a-virtual-environment) +2. [Download the MNIST dataset using the download_data.py script.](../README.md#downloading-datasets) +3. [Follow the links in these User Setup instructions](../../README.md#user-setup) to do the following: + - Build the containers + - Use the cookiecutter template to generate the scripts, configuration files, and Docker Compose files you will need to run Dioptra +4. [Edit the docker-compose.yml file to mount the data folder in the worker containers](../README.md#mounting-the-data-folder-in-the-worker-containers) +5. [Initialize and start Dioptra](https://pages.nist.gov/dioptra/getting-started/running-dioptra.html#initializing-the-deployment) +6. [Register the custom task plugins for Dioptra's examples and demos](../README.md#registering-custom-task-plugins) +7. [Register the queues for Dioptra's examples and demos](../README.md#registering-queues) +8. [Start JupyterLab and open `demo.ipynb`](../README.md#starting-jupyter-lab) + +Steps 1–4 and 6–7 only need to be run once. +**Returning users only need to repeat Steps 5 (if you stopped Dioptra using `docker compose down`) and 8 (if you stopped the `jupyter lab` process)**. diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb new file mode 100644 index 000000000..12e70a854 --- /dev/null +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -0,0 +1,422 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tensorflow MNIST Classifier demo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook contains an end-to-end demostration of Dioptra that can be run on any modern laptop.\n", + "Please see the [example README](README.md) for instructions on how to prepare your environment for running this example." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below we import the necessary Python modules and ensure the proper environment variables are set so that all the code blocks will work as expected," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "EXPERIMENT_NAME = \"mnist_fgm\"\n", + "EXPERIMENT_DESC = \"applying the fast gradient sign (FGM) attack to a classifier trained on MNIST\"\n", + "QUEUE_NAME = 'tensorflow_cpu'\n", + "QUEUE_DESC = 'Tensorflow CPU Queue'\n", + "PLUGIN_FILES = '../task-plugins/dioptra_custom/fgm_mnist_demo/'\n", + "MODEL_NAME = \"mnist_classifier\"\n", + "\n", + "# Default address for accessing the RESTful API service\n", + "RESTAPI_ADDRESS = \"http://localhost:20080\"\n", + "\n", + "# Default address for accessing the MLFlow Tracking server\n", + "MLFLOW_TRACKING_URI = \"http://localhost:35000\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import packages from the Python standard library\n", + "import importlib.util\n", + "import os\n", + "import sys\n", + "import pprint\n", + "import time\n", + "import warnings\n", + "from IPython.display import display, clear_output\n", + "import logging\n", + "import structlog\n", + "from pathlib import Path\n", + "\n", + "# Filter out warning messages\n", + "warnings.filterwarnings(\"ignore\")\n", + "structlog.configure(\n", + " wrapper_class=structlog.make_filtering_bound_logger(logging.ERROR),\n", + ")\n", + "\n", + "def register_python_source_file(module_name: str, filepath: Path) -> None:\n", + " \"\"\"Import a source file directly.\n", + "\n", + " Args:\n", + " module_name: The module name to associate with the imported source file.\n", + " filepath: The path to the source file.\n", + "\n", + " Notes:\n", + " Adapted from the following implementation in the Python documentation:\n", + " https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n", + " \"\"\"\n", + " spec = importlib.util.spec_from_file_location(module_name, str(filepath))\n", + " module = importlib.util.module_from_spec(spec)\n", + " sys.modules[module_name] = module\n", + " spec.loader.exec_module(module)\n", + "register_python_source_file(\"scripts\", Path(\"..\", \"scripts\", \"__init__.py\"))\n", + "\n", + "# Register the examples/scripts directory as a Python module\n", + "from scripts.client import DioptraClient\n", + "from scripts.utils import make_tar\n", + "from scripts.setup import upload_experiment, run_experiment, delete_all\n", + "\n", + "# Set DIOPTRA_RESTAPI_URI variable if not defined, used to connect to RESTful API service\n", + "if os.getenv(\"DIOPTRA_RESTAPI_URI\") is None:\n", + " os.environ[\"DIOPTRA_RESTAPI_URI\"] = RESTAPI_ADDRESS\n", + "\n", + "# Set MLFLOW_TRACKING_URI variable, used to connect to MLFlow Tracking service\n", + "if os.getenv(\"MLFLOW_TRACKING_URI\") is None:\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = MLFLOW_TRACKING_URI" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We obtained a copy of the MNIST dataset when we ran `download_data.py` script. If you have not done so already, see [How to Obtain Common Datasets](https://pages.nist.gov/dioptra/getting-started/acquiring-datasets.html).\n", + "The training and testing images for the MNIST dataset are stored within the `/dioptra/data/Mnist` directory as PNG files that are organized into the following folder structure," + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " Mnist\n", + " ├── testing\n", + " │ ├── 0\n", + " │ ├── 1\n", + " │ ├── 2\n", + " │ ├── 3\n", + " │ ├── 4\n", + " │ ├── 5\n", + " │ ├── 6\n", + " │ ├── 7\n", + " │ ├── 8\n", + " │ └── 9\n", + " └── training\n", + " ├── 0\n", + " ├── 1\n", + " ├── 2\n", + " ├── 3\n", + " ├── 4\n", + " ├── 5\n", + " ├── 6\n", + " ├── 7\n", + " ├── 8\n", + " └── 9" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The subfolders under `training/` and `testing/` are the classification labels for the images in the dataset.\n", + "This folder structure is a standardized way to encode the label information and many libraries can make use of it, including the Tensorflow library that we are using for this particular demo." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Submit and run jobs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To connect with the endpoint, we will use a client class defined in the `examples/scripts/client.py` file that is able to connect with the Dioptra RESTful API using the HTTP protocol.\n", + "We connect using the client below.\n", + "The client uses the environment variable `DIOPTRA_RESTAPI_URI`, which we configured at the top of the notebook, to figure out how to connect to the Dioptra RESTful API." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "client = DioptraClient()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is necessary to login to the RESTAPI to be able to perform any functions. Here we create a user if it is not created already, and login with it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " client.users.create('pluginuser','pluginuser@dioptra.nccoe.nist.gov','pleasemakesuretoPLUGINthecomputer','pleasemakesuretoPLUGINthecomputer')\n", + "except:\n", + " pass # ignore if user exists already\n", + "client.auth.login('pluginuser','pleasemakesuretoPLUGINthecomputer')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`wait_for_job` stalls til the previous job was finished, which is useful for jobs which depend on the output of other jobs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def wait_for_job(job, job_name):\n", + " n = 0\n", + " while job['status'] != 'finished': \n", + " job = client.jobs.get_by_id(job['id'])\n", + " time.sleep(1)\n", + " clear_output(wait=True)\n", + " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", + " n += 1\n", + " clear_output(wait=True)\n", + " display(f\"Job finished. Starting {job_name} job.\")\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "In this step, we are just uploading all of our entrypoints and the plugins they rely on to the Dioptra server." + ] + }, + { + "cell_type": "code", + "execution_count": 63, + "metadata": {}, + "outputs": [], + "source": [ + "#delete_all(client)\n", + "experiment_id, train_ep, queue_id = upload_experiment(client, 'src/train.yml','train','training a classifier on MNIST', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, fgm_ep, queue_id = upload_experiment(client, 'src/fgm.yml','fgm','generating examples on mnist_classifier using the fgm attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we need to train our model. This particular entrypoint uses a LeNet-5 model.\n", + "Depending on the specs of your computer, it can take 5-20 minutes or longer to complete.\n", + "If you are fortunate enough to have access to a dedicated GPU, then the training time will be much shorter." + ] + }, + { + "cell_type": "code", + "execution_count": 64, + "metadata": {}, + "outputs": [], + "source": [ + "job_time_limit = '1h'\n", + "\n", + "training_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id, \n", + " f\"training job for {experiment_id}\", \n", + " queue_id,\n", + " train_ep, \n", + " {\"epochs\":\"1\"}, \n", + " job_time_limit\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have trained a model, next we will apply the fast-gradient method (FGM) evasion attack on it to generate adversarial images.\n", + "\n", + "This specific workflow is an example of jobs that contain dependencies, as the metric evaluation jobs cannot start until the adversarial image generation jobs have completed, and the adversarial image generation job cannot start until the training job has completed.\n", + "\n", + "Note that the training_job id is needed to tell the FGM attack which model to generate examples against." + ] + }, + { + "cell_type": "code", + "execution_count": 65, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting fgm job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "job_time_limit = '1h'\n", + "\n", + "wait_for_job(training_job, 'fgm')\n", + "fgm_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"fgm job for {experiment_id}\",\n", + " queue_id,\n", + " fgm_ep,\n", + " {\"model_name\": MODEL_NAME, \"model_version\": str(-1)}, # -1 means get the latest\n", + " job_time_limit\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we can test out the results of our adversarial attack on the model we trained earlier. This will wait for the FGM job to finish, and then evaluate the model's performance on the adversarial examples. Note that we need to know both the `fgm_job` id as well as the `training_job` id, so that this entrypoint knows which run's adversarial examples to test against which model. \n", + "\n", + "The previous runs are all stored in Dioptra as well, so you can always go back later and retrieve examples, models, and even the code used to create them." + ] + }, + { + "cell_type": "code", + "execution_count": 66, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting infer job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "job_time_limit = '1h'\n", + "wait_for_job(fgm_job, 'infer')\n", + "infer_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"infer job for {experiment_id}\",\n", + " queue_id,\n", + " infer_ep,\n", + " {\"fgm_job_id\": str(fgm_job['id']), \"model_name\": MODEL_NAME, \"model_version\": str(-1)},\n", + " job_time_limit\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 70, + "metadata": {}, + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "Training accuracy:\n", + "{'accuracy': 0.9760833382606506,\n", + " 'auc': 0.9990718364715576,\n", + " 'loss': 0.07672422379255295,\n", + " 'precision': 0.9799415469169617,\n", + " 'recall': 0.9721999764442444,\n", + " 'training_time_in_minutes': 0.3090300166666667}\n", + "FGM accuracy:\n", + "{'accuracy': 0.16326121985912323,\n", + " 'auc': 0.6759902238845825,\n", + " 'loss': 2.7856907844543457,\n", + " 'precision': 0.09174499660730362,\n", + " 'recall': 0.044971954077482224}\n" + ] + } + ], + "source": [ + "from mlflow.tracking import MlflowClient\n", + "from uuid import UUID\n", + "mlflow_client = MlflowClient()\n", + "mlflow_runid = UUID(client.jobs.get_mlflow_run_id(training_job['id'])['mlflowRunId']).hex\n", + "mlflow_run = mlflow_client.get_run(mlflow_runid)\n", + "print(\"Training metrics:\")\n", + "pprint.pprint(mlflow_run.data.metrics)\n", + " \n", + "mlflow_runid = UUID(client.jobs.get_mlflow_run_id(infer_job['id'])['mlflowRunId']).hex\n", + "mlflow_run = mlflow_client.get_run(mlflow_runid)\n", + "print(\"FGM metrics:\")\n", + "pprint.pprint(mlflow_run.data.metrics)" + ] + } + ], + "metadata": { + "interpreter": { + "hash": "edee40310913f16e2ca02c1d37887bcb7f07f00399ca119bb7e27de7d632ea99" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/mnist-classifier-demo/src/fgm.yml b/examples/mnist-classifier-demo/src/fgm.yml new file mode 100644 index 000000000..66fb7fa15 --- /dev/null +++ b/examples/mnist-classifier-demo/src/fgm.yml @@ -0,0 +1,215 @@ +types: + path: + classifier: + artifact: + model_list: + list: classifier + artifact_list: + list: artifact + path_string: + union: [string, path] + kwargs: + mapping: [string, any] + kwargs_null: + union: [kwargs, "null"] + distance_metric_request: + mapping: [string, string] + distance_metrics_requests: + list: distance_metric_request + image_size: + tuple: [integer, integer, integer] + clip_values: + tuple: [number, number, number] + norm: + union: [integer, number, string] + str_null: + union: [string, "null"] + list_str_null: + list: str_null + num_null: + union: [number, "null"] + directory_iterator: + name_parameters: + mapping: + name: string + parameters: + mapping: [string, any] + metrics_list: + list: name_parameters + +parameters: + data_dir: /dioptra/data/Mnist/testing + image_size: [28, 28, 1] + adv_tar_name: testing_adversarial_fgm.tar.gz + adv_data_dir: adv_testing + distance_metrics_filename: distance_metrics.csv + model_name: mnist_classifier + model_version: -1 + clip_values: [0, 1] + batch_size: 32 + eps: 0.3 + eps_step: 0.1 + minimal: false + norm: "inf" + seed: -1 + +tasks: + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: data_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_model + inputs: + - name: model_name + type: string + required: false + - name: model_version + type: integer + required: false + - name: imagenet_preprocessing + type: boolean + required: false + - name: art + type: boolean + required: false + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: classifier + + attack: + plugin: dioptra_custom.fgm_mnist_demo.plugins.attack + inputs: + - dataset: any + - data_dir: string + - adv_data_dir: path_string + - classifier: any + - image_size: image_size + - distance_metrics: distance_metrics_requests + - name: rescale + type: number + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: eps + type: number + required: false + - name: eps_step + type: number + required: false + - name: minimal + type: boolean + required: false + - name: norm + type: norm + required: false + - name: file_format_kwargs + type: kwargs_null + required: false + outputs: + ret: artifact + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + +graph: + dataset: + load_dataset: + ep_seed: $seed + data_dir: $data_dir + subsets: [testing] + image_size: $image_size + batch_size: $batch_size + + model: + load_model: + model_name: $model_name + model_version: $model_version + art: true + classifier_kwargs: + clip_values: $clip_values + + + fgm: + attack: + dataset: $dataset.testing + data_dir: $data_dir + adv_data_dir: $adv_data_dir + classifier: $model + image_size: $image_size + batch_size: $batch_size + eps: $eps + eps_step: $eps_step + minimal: $minimal + norm: $norm + distance_metrics: + - name: l_infinity_norm + func: l_inf_norm + - name: l_1_norm + func: l_1_norm + - name: l_2_norm + func: l_2_norm + - name: cosine_similarity + func: paired_cosine_similarities + - name: euclidean_distance + func: paired_euclidean_distances + - name: manhattan_distance + func: paired_manhattan_distances + - name: wasserstein_distance + func: paired_wasserstein_distances + + save: + save_artifacts_and_models: + artifacts: + - type: tarball + adv_data_dir: $adv_data_dir + adv_tar_name: $adv_tar_name + - type: dataframe + data_frame: $fgm + file_name: $distance_metrics_filename + file_format: csv.gz + file_format_kwargs: + index: false \ No newline at end of file diff --git a/examples/mnist-classifier-demo/src/infer.yml b/examples/mnist-classifier-demo/src/infer.yml new file mode 100644 index 000000000..8e2071edc --- /dev/null +++ b/examples/mnist-classifier-demo/src/infer.yml @@ -0,0 +1,123 @@ +types: + path: + classifier: + num_null: + union: [number, "null"] + path_string: + union: [string, path] + list_path_string: + list: path_string + str_null: + union: [string, "null"] + list_str_null: + list: str_null + directory_iterator: + kwargs: + mapping: [string, any] + image_size: + tuple: [integer, integer, integer] + +parameters: + run_id: "" + image_size: [28, 28, 1] + model_name: mnist_classifier + model_version: -1 + fgm_job_id: + adv_tar_name: testing_adversarial_fgm.tar.gz + adv_data_dir: adv_testing + seed: -1 + +tasks: + load_artifacts_for_job: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job + inputs: + - job_id: string + - name: extract_files + type: list_path_string + required: false + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: data_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_model + inputs: + - name: model_name + type: string + required: false + - name: model_version + type: integer + required: false + - name: imagenet_preprocessing + type: boolean + required: false + - name: art + type: boolean + required: false + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: classifier + + compute_metrics: + plugin: dioptra_custom.fgm_mnist_demo.plugins.compute_metrics + inputs: + - classifier: classifier + - dataset: directory_iterator + +graph: + load: + load_artifacts_for_job: + job_id: $fgm_job_id + extract_files: [$adv_tar_name] + + dataset: + load_dataset: + ep_seed: $seed + data_dir: $adv_data_dir + subsets: [testing] + image_size: $image_size + + model: + load_model: + model_name: $model_name + model_version: $model_version + + metrics: + compute_metrics: + classifier: $model + dataset: $dataset.testing + dependencies: + - model \ No newline at end of file diff --git a/examples/mnist-classifier-demo/src/train.yml b/examples/mnist-classifier-demo/src/train.yml new file mode 100644 index 000000000..2bd1fd161 --- /dev/null +++ b/examples/mnist-classifier-demo/src/train.yml @@ -0,0 +1,203 @@ +types: + name_parameters: + mapping: + name: string + parameters: + mapping: [string, any] + metrics_list: + list: name_parameters + callbacks_list: + list: name_parameters + model_list: + list: model_def + model_def: + mapping: [string, any] + artifact_list: + list: artifact + artifact: + mapping: [string, any] + directory_iterator: + image_size: + tuple: [integer, integer, integer] + classifier: + fit_kwargs: + mapping: [string, any] + fit_kwargs_null: + union: [fit_kwargs, "null"] + str_null: + union: [string, "null"] + list_str_null: + list: str_null + num_null: + union: [number, "null"] + kwargs: + mapping: [string, any] + +parameters: + seed: -1 + optimizer_name: Adam + learning_rate: 0.001 + training_dir: /dioptra/data/Mnist/training + testing_dir: /dioptra/data/Mnist/testing + image_size: [28, 28, 1] + validation_split: 0.2 + batch_size: 32 + model_architecture: le_net + epochs: 30 + register_model_name: "mnist_classifier" + +tasks: + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: data_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + create_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.create_model + inputs: + - name: dataset + type: directory_iterator + required: false + - name: model_architecture + type: string + required: false + - name: input_shape + type: image_size + required: false + - name: loss + type: string + required: false + - name: learning_rate + type: number + required: false + - name: optimizer + type: string + required: false + - name: metrics_list + type: metrics_list + outputs: + classifier: classifier + + train: + plugin: dioptra_custom.fgm_mnist_demo.plugins.train + inputs: + - estimator: classifier + - x: any + - name: y + type: any + required: false + - name: callbacks_list + type: callbacks_list + required: false + - name: fit_kwargs + type: fit_kwargs_null + required: false + outputs: + classifier: classifier + + compute_metrics: + plugin: dioptra_custom.fgm_mnist_demo.plugins.compute_metrics + inputs: + - classifier: classifier + - dataset: directory_iterator + + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + +graph: + dataset: + load_dataset: + ep_seed: $seed + data_dir: $training_dir + subsets: [training, validation, testing] + image_size: $image_size + validation_split: $validation_split + batch_size: $batch_size + + model: + create_model: + dataset: $dataset.training + model_architecture: $model_architecture + input_shape: $image_size + learning_rate: $learning_rate + optimizer: $optimizer_name + metrics_list: + - name: CategoricalAccuracy + parameters: { name: accuracy } + - name: Precision + parameters: { name: precision } + - name: Recall + parameters: { name: recall } + - name: AUC + parameters: { name: auc } + dependencies: + - dataset + + trained_model: + train: + estimator: $model + x: $dataset.training + callbacks_list: + - name: EarlyStopping + parameters: + monitor: val_loss + min_delta: .01 + patience: 5 + restore_best_weights: true + fit_kwargs: + nb_epochs: $epochs + validation_data: $dataset.validation + verbose: 2 + dependencies: + - model + + metrics: + compute_metrics: + classifier: $trained_model + dataset: $dataset.testing + dependencies: + - trained_model + + save: + save_artifacts_and_models: + models: + - name: $register_model_name + model: $trained_model + \ No newline at end of file diff --git a/examples/scripts/client.py b/examples/scripts/client.py index e3fb4e450..3aba9e9fa 100644 --- a/examples/scripts/client.py +++ b/examples/scripts/client.py @@ -1,221 +1,1023 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode from __future__ import annotations import os -from pathlib import Path from posixpath import join as urljoin -from typing import Any from urllib.parse import urlparse, urlunparse import requests +import structlog +from structlog.stdlib import BoundLogger + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +class APIConnectionError(Exception): + """Class for connection errors""" + + +class StatusCodeError(Exception): + """Class for status code errors""" + + +class JSONDecodeError(Exception): + """Class for JSON decode errors""" + + +def create_data_dict(**kwargs): + return kwargs + + +def debug_request(url, method, data=None): + LOGGER.debug("Request made.", url=url, method=method, data=data) + + +def debug_response(json): + LOGGER.debug("Response received.", json=json) + + +def get(session, endpoint, *features): + debug_request(urljoin(endpoint, *features), "GET") + return make_request(session, "get", endpoint, None, *features) + + +def post(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "POST", data) + return make_request(session, "post", endpoint, data, *features) + + +def delete(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "DELETE", data) + return make_request(session, "delete", endpoint, data, *features) + + +def put(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "PUT", data) + return make_request(session, "put", endpoint, data, *features) + + +def make_request(session, method_name, endpoint, data, *features): + url = urljoin(endpoint, *features) + method = getattr(session, method_name) + try: + if data: + response = method(url, json=data) + else: + response = method(url) + if response.status_code != 200: + raise StatusCodeError() + json = response.json() + except (requests.ConnectionError, StatusCodeError, requests.JSONDecodeError) as e: + handle_error(session, url, method_name.upper(), data, response, e) + debug_response(json=json) + return json + + +def handle_error(session, url, method, data, response, error): + if type(error) is requests.ConnectionError: + restapi = os.environ["DIOPTRA_RESTAPI_URI"] + message = ( + f"Could not connect to the REST API. Is the server running at {restapi}?" + ) + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise APIConnectionError(message) + if type(error) is StatusCodeError: + message = f"Error code {response.status_code} returned." + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise StatusCodeError(message) + if type(error) is requests.JSONDecodeError: + message = "JSON response could not be decoded." + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise JSONDecodeError(message) class DioptraClient(object): - def __init__(self, address: str | None = None, api_version: str = "v0") -> None: + def __init__(self, session=None, address=None, api_version="v1"): address = ( f"{address}/api/{api_version}" if address else f"{os.environ['DIOPTRA_RESTAPI_URI']}/api/{api_version}" ) - self._scheme, self._netloc, self._path, _, _, _ = urlparse(address) - @property - def experiment_endpoint(self) -> str: - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "experiment/"), "", "", "") + self._session = session if session is not None else requests.Session() + self._users = UsersClient(session, "users", address) + self._auth = AuthClient(session, "auth", address) + self._queues = QueuesClient(session, "queues", address) + self._groups = GroupsClient(session, "groups", address) + self._tags = TagsClient(session, "tags", address) + self._plugins = PluginsClient(session, "plugins", address) + self._pluginParameterTypes = PluginParameterTypesClient( + session, "pluginParameterTypes", address ) + self._experiments = ExperimentsClient(session, "experiments", address) + self._jobs = JobsClient(session, "jobs", address) + self._entrypoints = EntrypointsClient(session, "entrypoints", address) + self._models = ModelsClient(session, "models", address) + self._artifacts = ArtifactsClient(session, "artifacts", address) + # models + # artifacts @property - def job_endpoint(self) -> str: - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "job/"), "", "", "") - ) + def users(self): + return self.get_endpoint(self._users) @property - def task_plugin_endpoint(self) -> str: - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "taskPlugin/"), "", "", "") - ) + def auth(self): + return self.get_endpoint(self._auth) @property - def task_plugin_builtins_endpoint(self) -> str: - return urlunparse( - ( - self._scheme, - self._netloc, - urljoin(self._path, "taskPlugin/dioptra_builtins"), - "", - "", - "", - ) - ) + def queues(self): + return self.get_endpoint(self._queues) @property - def task_plugin_custom_endpoint(self) -> str: - return urlunparse( - ( - self._scheme, - self._netloc, - urljoin(self._path, "taskPlugin/dioptra_custom"), - "", - "", - "", - ) - ) + def groups(self): + return self.get_endpoint(self._groups) + + @property + def tags(self): + return self.get_endpoint(self._tags) + + @property + def plugins(self): + return self.get_endpoint(self._plugins) + + @property + def pluginParameterTypes(self): + return self.get_endpoint(self._pluginParameterTypes) + + @property + def experiments(self): + return self.get_endpoint(self._experiments) + + @property + def jobs(self): + return self.get_endpoint(self._jobs) + + @property + def entrypoints(self): + return self.get_endpoint(self._entrypoints) + + @property + def models(self): + return self.get_endpoint(self._models) + + @property + def artifacts(self): + return self.get_endpoint(self._artifacts) + + def get_endpoint(self, ep): + ep.session = self._session + return ep + + +class HasTagsProvider(object): + def __init__(self, url, session): + self._tags = TagsProvider(url, session) + + @property + def tags(self): + return self.get_endpoint(self._tags) + + def get_endpoint(self, ep): + ep.session = self._session + return ep + + +class HasDraftsEndpoint(object): + def __init__(self, url, session, address, fields, put_fields=None): + self.draft_fields = fields + self.put_fields = put_fields if put_fields is not None else fields + self._drafts = DraftsEndpoint(url, self, session, "draft", address) + + @property + def drafts(self): + return self.get_endpoint(self._drafts) + + def get_endpoint(self, ep): + ep.session = self._session + return ep + + +class HasSubEndpointProvider(object): + def __init__(self, url): + self._url = url + + def idurl(self, ep_id): + return urljoin(self._url, ep_id) + + +class Endpoint(object): + def __init__(self, session, ep_name, address): + self._scheme, self._netloc, self._path, _, _, _ = urlparse(address) + self._ep_name = ep_name + self._session = session @property - def queue_endpoint(self) -> str: + def session(self): + return self._session + + @session.setter + def session(self, s): + self._session = s + + @property + def url(self): + return self.def_endpoint(self._ep_name) + + @property + def ep_name(self): + return self._ep_name + + def def_endpoint(self, name): + """creates base url for an endpoint by name""" return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "queue/"), "", "", "") + (self._scheme, self._netloc, urljoin(self._path, name + "/"), "", "", "") + ) + + def get_all(self, search=None, groupId=None, index=None, pageLength=None): + """gets all resources""" + return get(self.session, self.url, build_get_params(search=search, groupId=groupId, index=index, pageLength=pageLength)) + + +class SubEndpoint(Endpoint): + def __init__(self, parent, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + self._parent = parent # parent should extend HasSubEndpointProvider + + def suburl(self, ep_id): + return urljoin(self._parent.idurl(str(ep_id)), self.ep_name) + +def build_get_params(**kwargs): + kwargs = dict(filter(lambda item: item[1] != None, kwargs.items())) + if (kwargs != {}): + return '?' + '&'.join([str(a[0]) + '=' + str(a[1]) for a in kwargs.items()]) + else: + return '' + +class UsersClient(Endpoint): + def create(self, username, email, password, confirm_password): + """creates a user""" + d = { + "username": username, + "email": email, + "password": password, + "confirmPassword": confirm_password, + } + return post(self.session, self.url, d) + + def get_by_id(self, user_id): + """get a user by id""" + return get(self.session, self.url, str(user_id)) + + def update_password_by_id( + self, user_id, old_password, new_password, confirm_new_password + ): + """change a user's password by id""" + d = { + "oldPassword": old_password, + "newPassword": new_password, + "confirmNewPassword": confirm_new_password, + } + return post(self.session, self.url, d, str(user_id), "password") + + def current(self): + """get the current user""" + return get(self.session, self.url, "current") + + def delete_current(self, password): + """delete the current user""" + d = {"password": password} + return delete(self.session, self.url, d, "current") + + def modify_current(self, username, email): + """modify the current user""" + d = {"username": username, "email": email} + return put(self.session, self.url, d, "current") + + def modify_current_password(self, old_password, new_password, confirm_new_password): + """modify the current user's password""" + d = { + "oldPassword": old_password, + "newPassword": new_password, + "confirmNewPassword": confirm_new_password, + } + return post(self.session, self.url, d, "current", "password") + + def failed_user_post(self): + """create a post request with an invalid schema, for testing""" + return post(self.session, self.url, {"a": "doesnotexist"}) + + def failed_user_get(self): + """create a get request to an invalid url, for testing""" + return get(self.session, self.url, "doesnotexist") + + +class AuthClient(Endpoint): + def login(self, username, password): + """login as the given user""" + d = {"username": username, "password": password} + return post(self.session, self.url, d, "login") + + def logout(self, everywhere): + """logout as the current user""" + d = {"everywhere": everywhere} + return post(self.session, self.url, d, "logout") + + +class GroupsClient(Endpoint): + def get_by_id(self, gid): + """get a group by id""" + return get(self.session, self.url, str(gid)) + + +class QueuesClient(Endpoint, HasDraftsEndpoint, HasSubEndpointProvider): + def __init__(self, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + HasDraftsEndpoint.__init__( + self, self.url, self.session, address, ["name", "description"] ) + HasSubEndpointProvider.__init__(self, self.url) + + def create(self, group, name, description): + """create a queue""" + d = {"group": group, "name": name, "description": description} + return post(self.session, self.url, d) + + def modify_by_id(self, queue_id, name, description): + """modify a queue by id""" + d = {"name": name, "description": description} + return put(self.session, self.url, d, str(queue_id)) + + def delete_by_id(self, queue_id): + """delete a queue by id""" + d = None + return delete(self.session, self.url, d, str(queue_id)) + + def get_by_id(self, queue_id): + """get a queue by id""" + return get(self.session, self.url, str(queue_id)) - def delete_custom_task_plugin(self, name: str): - plugin_name_query: str = urljoin(self.task_plugin_custom_endpoint, name) - return requests.delete(plugin_name_query).json() - def get_experiment_by_id(self, id: int): - experiment_id_query: str = urljoin(self.experiment_endpoint, str(id)) - return requests.get(experiment_id_query).json() +class TagsClient(Endpoint): - def get_experiment_by_name(self, name: str): - experiment_name_query: str = urljoin(self.experiment_endpoint, "name", name) - return requests.get(experiment_name_query).json() + def create(self, group, name): + d = {"name": name, "group": group} + return post(self.session, self.url, d) - def get_job_by_id(self, id: str): - job_id_query: str = urljoin(self.job_endpoint, id) - return requests.get(job_id_query).json() + def delete_by_id(self, tag_id): + d = None + return delete(self.session, self.url, d, str(tag_id)) - def get_queue_by_id(self, id: int): - queue_id_query: str = urljoin(self.queue_endpoint, str(id)) - return requests.get(queue_id_query).json() + def get_by_id(self, tag_id): + return get(self.session, self.url, str(tag_id)) - def get_queue_by_name(self, name: str): - queue_name_query: str = urljoin(self.queue_endpoint, "name", name) - return requests.get(queue_name_query).json() + def modify_by_id(self, tag_id, name): + d = {"name": name} + return put(self.session, self.url, d, str(tag_id)) - def get_builtin_task_plugin(self, name: str): - task_plugin_name_query: str = urljoin(self.task_plugin_builtins_endpoint, name) - return requests.get(task_plugin_name_query).json() + def get_resources_by_id(self, tag_id): + return get(self.session, self.url, str(tag_id), "resources") + + +class EntrypointsClient( + Endpoint, HasTagsProvider, HasDraftsEndpoint, HasSubEndpointProvider +): + def __init__(self, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + HasTagsProvider.__init__(self, self.url, self.session) + HasDraftsEndpoint.__init__( + self, + self.url, + self.session, + address, + ["name", "description", "taskGraph", "parameters", "queues", "plugins"], + ) + HasSubEndpointProvider.__init__(self, self.url) + + def create(self, group, name, description, taskGraph, parameters, queues, plugins): + d = { + "group": group, + "name": name, + "description": description, + "taskGraph": taskGraph, + "parameters": parameters, + "queues": queues, + "plugins": plugins, + } + return post(self.session, self.url, d) + + def modify_by_id( + self, entrypoint_id, name, description, taskGraph, parameters, queues + ): + d = { + "name": name, + "description": description, + "taskGraph": taskGraph, + "parameters": parameters, + "queues": queues, + } + return put(self.session, self.url, d, str(entrypoint_id)) - def get_custom_task_plugin(self, name: str): - task_plugin_name_query: str = urljoin(self.task_plugin_custom_endpoint, name) - return requests.get(task_plugin_name_query).json() + def get_by_id(self, entrypoint_id): + return get(self.session, self.url, str(entrypoint_id)) - def list_experiments(self) -> list[dict[str, Any]]: - return requests.get(self.experiment_endpoint).json() + def delete_by_id(self, entrypoint_id): + d = None + return delete(self.session, self.url, d, str(entrypoint_id)) - def list_jobs(self) -> list[dict[str, Any]]: - return requests.get(self.job_endpoint).json() + def get_plugins_by_entrypoint_id(self, entrypoint_id): + return get(self.session, self.url, str(entrypoint_id), "plugins") - def list_queues(self) -> list[dict[str, Any]]: - return requests.get(self.queue_endpoint).json() + def add_plugins_by_entrypoint_id(self, entrypoint_id, plugins): + d = {"plugins": plugins} + return post(self.session, self.url, d, str(entrypoint_id), "plugins") - def list_all_task_plugins(self) -> list[dict[str, Any]]: - return requests.get(self.task_plugin_endpoint).json() + def get_plugins_by_entrypoint_id_plugin_id(self, entrypoint_id, plugin_id): + return get( + self.session, self.url, str(entrypoint_id), "plugins", str(plugin_id) + ) - def list_builtin_task_plugins(self) -> list[dict[str, Any]]: - return requests.get(self.task_plugin_builtins_endpoint).json() + def delete_plugins_by_entrypoint_id_plugin_id(self, entrypoint_id, plugin_id): + d = None + return delete( + self.session, self.url, d, str(entrypoint_id), "plugins", str(plugin_id) + ) - def list_custom_task_plugins(self) -> list[dict[str, Any]]: - return requests.get(self.task_plugin_custom_endpoint).json() + def modify_queues_by_entrypoint_id(self, entrypoint_id, ids): + d = {"ids": ids} + return put(self.session, self.url, d, str(entrypoint_id), "queues") - def lock_queue(self, name: str): - queue_name_query: str = urljoin(self.queue_endpoint, "name", name, "lock") - return requests.put(queue_name_query).json() + def add_queues_by_entrypoint_id(self, entrypoint_id, ids): + d = {"ids": ids} + return post(self.session, self.url, d, str(entrypoint_id), "queues") - def unlock_queue(self, name: str): - queue_name_query: str = urljoin(self.queue_endpoint, "name", name, "lock") - return requests.delete(queue_name_query).json() + def get_queues_by_entrypoint_id(self, entrypoint_id): + return get(self.session, self.url, str(entrypoint_id), "queues") - def register_experiment(self, name: str) -> dict[str, Any]: - experiment_registration_form = {"name": name} + def delete_queues_by_entrypoint_id(self, entrypoint_id): + d = None + return delete(self.session, self.url, d, str(entrypoint_id), "queues") - response = requests.post( - self.experiment_endpoint, - json=experiment_registration_form, + def delete_queues_by_entrypoint_id_queue_id(self, entrypoint_id, queue_id): + d = None + return delete( + self.session, self.url, d, str(entrypoint_id), "queues", str(queue_id) ) - return response.json() + def get_snapshots_by_entrypoint_id(self, entrypoint_id): + return get(self.session, self.url, str(entrypoint_id), "snapshots") + + def get_snapshots_by_entrypoint_id_snapshot_id(self, entrypoint_id, snapshot_id): + return get( + self.session, self.url, str(entrypoint_id), "snapshots", str(snapshot_id) + ) - def register_queue(self, name: str = "tensorflow_cpu") -> dict[str, Any]: - queue_registration_form = {"name": name} - response = requests.post( - self.queue_endpoint, - json=queue_registration_form, +class ExperimentsClient( + Endpoint, HasTagsProvider, HasDraftsEndpoint, HasSubEndpointProvider +): + def __init__(self, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + HasTagsProvider.__init__(self, self.url, self.session) + HasDraftsEndpoint.__init__( + self, + self.url, + self.session, + address, + ["name", "description", "entrypoints"], + ) + HasSubEndpointProvider.__init__(self, self.url) + + def create(self, group, name, description, entrypoints): + d = { + "group": group, + "name": name, + "description": description, + "entrypoints": entrypoints, + } + return post(self.session, self.url, d) + + def get_drafts(self): + return get(self.session, self.url, "drafts") + + def get_by_id(self, experiment_id): + return get(self.session, self.url, str(experiment_id)) + + def modify_by_id(self, experiment_id, name, description, entrypoints): + d = {"name": name, "description": description, "entrypoints": entrypoints} + return put(self.session, self.url, d, str(experiment_id)) + + def delete_by_id(self, experiment_id): + d = None + return delete(self.session, self.url, d, str(experiment_id)) + + def get_entrypoints_by_experiment_id(self, experiment_id): + return get(self.session, self.url, str(experiment_id), "entrypoints") + + def modify_entrypoints_by_experiment_id(self, experiment_id, ids): + d = {"ids": ids} + return put(self.session, self.url, d, str(experiment_id), "entrypoints") + + def add_entrypoints_by_experiment_id(self, experiment_id, ids): + d = {"ids": ids} + return post(self.session, self.url, d, str(experiment_id), "entrypoints") + + def delete_entrypoints_by_experiment_id(self, experiment_id): + d = None + return delete(self.session, self.url, d, str(experiment_id), "entrypoints") + + def delete_entrypoints_by_experiment_id_entrypoint_id( + self, experiment_id, entrypoint_id + ): + d = None + return delete( + self.session, + self.url, + d, + str(experiment_id), + "entrypoints", + str(entrypoint_id), ) - return response.json() + def get_jobs_by_experiment_id(self, experiment_id): + return get(self.session, self.url, str(experiment_id), "jobs") - def submit_job( - self, - workflows_file: str | Path, - experiment_name: str, - entry_point: str, - entry_point_kwargs: str | None = None, - depends_on: str | None = None, - queue: str = "tensorflow_cpu", - timeout: str = "24h", - ) -> dict[str, Any]: - job_form: dict[str, Any] = { - "experimentName": experiment_name, + def create_jobs_by_experiment_id( + self, experiment_id, description, queue, entrypoint, values, timeout + ): + d = { + "description": description, "queue": queue, + "entrypoint": entrypoint, + "values": values, "timeout": timeout, - "entryPoint": entry_point, } + return post(self.session, self.url, d, str(experiment_id), "jobs") + + def get_jobs_by_experiment_id_job_id(self, experiment_id, job_id): + return get(self.session, self.url, str(experiment_id), "jobs", str(job_id)) + + def delete_jobs_by_experiment_id_job_id(self, experiment_id, job_id): + d = None + return delete( + self.session, self.url, d, str(experiment_id), "jobs", str(job_id) + ) + + def get_jobs_status_by_experiment_id_job_id(self, experiment_id, job_id): + return get( + self.session, self.url, str(experiment_id), "jobs", str(job_id), "status" + ) + + def modify_jobs_status_by_experiment_id_job_id(self, experiment_id, job_id, status): + d = {"status": status} + return put( + self.session, self.url, d, str(experiment_id), "jobs", str(job_id), "status" + ) + + def get_snapshots_by_experiment_id(self, experiment_id): + return get(self.session, self.url, str(experiment_id), "snapshots") + + def get_snapshots_by_experiment_id_snapshot_id(self, experiment_id, snapshot_id): + return get( + self.session, self.url, str(experiment_id), "snapshots", str(snapshot_id) + ) + + +class JobsClient(Endpoint, HasTagsProvider): + def __init__(self, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + HasTagsProvider.__init__(self, self.url, self.session) + + def delete_by_id(self, job_id): + d = None + return delete(self.session, self.url, d, str(job_id)) + + def get_by_id(self, job_id): + return get(self.session, self.url, str(job_id)) + + def get_mlflow_run_id(self, job_id): + return get(self.session, self.url, str(job_id), "mlflowRun") + + def get_snapshots_by_job_id(self, job_id): + return get(self.session, self.url, str(job_id), "snapshots") + + def get_snapshots_by_job_id_snapshot_id(self, job_id, snapshot_id): + return get(self.session, self.url, str(job_id), "snapshots", str(snapshot_id)) + + def get_status_by_job_id(self, job_id): + return get(self.session, self.url, str(job_id), "status") + + +class PluginsClient( + Endpoint, HasDraftsEndpoint, HasSubEndpointProvider, HasTagsProvider +): + def __init__(self, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + HasTagsProvider.__init__(self, self.url, self.session) + HasDraftsEndpoint.__init__( + self, self.url, self.session, address, ["name", "description"] + ) + HasSubEndpointProvider.__init__(self, self.url) + self._files = PluginFilesClient(self, session, "files", address) + + @property + def files(self): + self._files.session = self.session + return self._files + + def create(self, group, name, description): + d = {"group": group, "name": name, "description": description} + return post(self.session, self.url, d) + + def get_by_id(self, plugin_id): + return get(self.session, self.url, str(plugin_id)) + + def modify_by_id(self, plugin_id, name, description): + d = {"name": name, "description": description} + return put(self.session, self.url, d, str(plugin_id)) + + def delete_by_id(self, plugin_id): + d = None + return delete(self.session, self.url, d, str(plugin_id)) + + def get_snapshots_by_plugin_id(self, plugin_id): + return get(self.session, self.url, str(plugin_id), "snapshots") + + def get_snapshot_by_plugin_id_snapshot_id(self, plugin_id, snapshot_id): + return get( + self.session, self.url, str(plugin_id), "snapshots", str(snapshot_id) + ) + + +class PluginFilesClient(SubEndpoint): + def __init__(self, parent, session, ep_name, address): + SubEndpoint.__init__(self, parent, session, ep_name, address) + # HasTagsProvider.__init__(self, self.url, self.session) + # HasDraftsEndpoint.__init__(self, self.url, self.session, address, + # ["filename", "description"] + # ) + # HasSubEndpointProvider.__init__(self, self.url) + + def get_files_by_plugin_id(self, plugin_id, search=None, groupId=None, index=None, pageLength=None): + return get(self.session, self.suburl(plugin_id) + build_get_params(search=search, groupId=groupId, index=index, pageLength=pageLength)) + + def create_files_by_plugin_id( + self, plugin_id, filename, contents, description, plugins + ): + d = { + "filename": filename, + "contents": contents, + "description": description, + "tasks": plugins, + } + return post(self.session, self.suburl(plugin_id), d) + + def delete_files_by_plugin_id(self, plugin_id): + d = None + return delete(self.session, self.suburl(plugin_id), d) + + def get_files_drafts_by_plugin_id(self, plugin_id): + return get(self.session, self.suburl(plugin_id), "drafts") + + def create_files_drafts_by_plugin_id( + self, plugin_id, filename, contents, description, plugins + ): + d = { + "filename": filename, + "contents": contents, + "description": description, + "tasks": plugins, + } + return post(self.session, self.suburl(plugin_id), d, "drafts") + + def get_files_drafts_by_plugin_id_draft_id(self, plugin_id, drafts_id): + return get(self.session, self.suburl(plugin_id), "drafts", str(drafts_id)) + + def modify_files_drafts_by_plugin_id_draft_id( + self, plugin_id, drafts_id, filename, contents, description, plugins + ): + d = { + "filename": filename, + "contents": contents, + "description": description, + "tasks": plugins, + } + return put(self.session, self.suburl(plugin_id), d, "drafts", str(drafts_id)) + + def delete_files_drafts_by_plugin_id_draft_id(self, plugin_id, drafts_id): + d = None + return delete(self.session, self.suburl(plugin_id), d, "drafts", str(drafts_id)) + + def get_files_by_plugin_id_file_id(self, plugin_id, file_id): + return get(self.session, self.suburl(plugin_id), str(file_id)) + + def modify_files_by_plugin_id_file_id( + self, plugin_id, file_id, filename, contents, description, plugins + ): + d = { + "filename": filename, + "contents": contents, + "description": description, + "tasks": plugins, + } + return put(self.session, self.suburl(plugin_id), d, str(file_id)) + + def delete_files_by_plugin_id_file_id(self, plugin_id, file_id): + d = None + return delete(self.session, self.suburl(plugin_id), d, str(file_id)) + + def get_files_draft_by_plugin_id_file_id(self, plugin_id, file_id): + return get(self.session, self.suburl(plugin_id), str(file_id), "draft") + + def modify_files_draft_by_plugin_id_file_id( + self, plugin_id, file_id, filename, contents, description, plugins + ): + d = { + "filename": filename, + "contents": contents, + "description": description, + "tasks": plugins, + } + return put(self.session, self.suburl(plugin_id), d, str(file_id), "draft") + + def delete_files_draft_by_plugin_id_file_id(self, plugin_id, file_id): + d = None + return delete(self.session, self.suburl(plugin_id), d, str(file_id), "draft") + + def create_files_draft_by_plugin_id_file_id( + self, plugin_id, file_id, filename, contents, description, plugins + ): + d = { + "filename": filename, + "contents": contents, + "description": description, + "tasks": plugins, + } + return post(self.session, self.suburl(plugin_id), d, str(file_id), "draft") + + def get_snapshots_by_plugin_id_file_id(self, plugin_id, file_id): + return get(self.session, self.suburl(plugin_id), str(file_id), "snapshots") + + def get_snapshots_by_plugin_id_file_id_snapshot_id( + self, plugin_id, file_id, snapshot_id + ): + return get( + self.session, + self.suburl(plugin_id), + str(file_id), + "snapshots", + str(snapshot_id), + ) + + def get_tags_by_plugin_id_file_id(self, plugin_id, file_id): + return get(self.session, self.suburl(plugin_id), str(file_id), "tags") + + def modify_tags_by_plugin_id_file_id(self, plugin_id, file_id, ids): + d = {"ids": ids} + return put(self.session, self.suburl(plugin_id), d, str(file_id), "tags") - if entry_point_kwargs is not None: - job_form["entryPointKwargs"] = entry_point_kwargs - - if depends_on is not None: - job_form["dependsOn"] = depends_on - - workflows_file = Path(workflows_file) - - with workflows_file.open("rb") as f: - job_files = {"workflow": (workflows_file.name, f)} - response = requests.post( - self.job_endpoint, - data=job_form, - files=job_files, - ) - - return response.json() - - def upload_custom_plugin_package( - self, - custom_plugin_name: str, - custom_plugin_file: str | Path, - collection: str = "dioptra_custom", - ) -> dict[str, Any]: - plugin_upload_form = { - "taskPluginName": custom_plugin_name, - "collection": collection, + def delete_tags_by_plugin_id_file_id(self, plugin_id, file_id): + d = None + return delete(self.session, self.suburl(plugin_id), d, str(file_id), "tags") + + def add_tags_by_plugin_id_file_id(self, plugin_id, file_id, ids): + d = {"ids": ids} + return post(self.session, self.suburl(plugin_id), d, str(file_id), "tags") + + def delete_tags_by_plugin_id_file_id_tag_id(self, plugin_id, file_id, tag_id): + d = None + return delete( + self.session, self.suburl(plugin_id), d, str(file_id), "tags", str(tag_id) + ) + + +class PluginParameterTypesClient(Endpoint): + + def create(self, group, name, description, structure): + d = { + "group": group, + "name": name, + "description": description, + "structure": structure, } + return post(self.session, self.url, d) + + def get_by_id(self, type_id): + return get(self.session, self.url, str(type_id)) + + def modify_by_id(self, type_id, name, description, structure): + d = {"name": name, "description": description, "structure": structure} + return put(self.session, self.url, d, str(type_id)) + + def delete_by_id(self, type_id): + d = None + return delete(self.session, self.url, d, str(type_id)) + + +class PluginTask(object): + def __init__(self, name, inputs, outputs, client): + self.name = name + self.inputs = inputs # expects [(name1, type1), (name2, type2) ...] + self.outputs = outputs # expects [(name1, type1), (name2, type2) ...] + self.client = client + + def convert_params_to_ids(self, mappings): + """this converts parameters to registered ids using a mapping + from register_unregistered_types""" + return [(i[0], mappings[i[1]]) for i in self.inputs], [ + (o[0], mappings[o[1]]) for o in self.outputs + ] + + def register_unregistered_types(self, group=1): + """checks all the types in inputs/outputs and register things that + aren't registered""" + registered_types = ( + self.client.pluginParameterTypes.get_all() + ) # get all registered types + types_used_in_plugin = set( + [m[1] for m in self.inputs] + [m[1] for m in self.outputs] + ) # get all types for this plugin + types_to_id = {} + for registered in registered_types[ + "data" + ]: # add registered types to our dictionary + types_to_id[str(registered["name"])] = str(registered["id"]) + for used in types_used_in_plugin: + used = str(used) + if used not in types_to_id: # not yet registered, so register it + response = self.client.pluginParameterTypes.create( + group, used, used + " plugin parameter", structure={} + ) + types_to_id[used] = str(response["id"]) + return types_to_id # mapping of types to ids + + def as_dict(self, mappings=None): + """convert it to a dict to be sent to the RESTAPI""" + if mappings is None: + mappings = self.register_unregistered_types() + ins, outs = self.convert_params_to_ids(mappings) + return { + "name": self.name, + "inputParams": [ + {"name": param[0], "parameterType": param[1]} for param in ins + ], + "outputParams": [ + {"name": param[0], "parameterType": param[1]} for param in outs + ], + } + - custom_plugin_file = Path(custom_plugin_file) +class ArtifactsClient(Endpoint): - with custom_plugin_file.open("rb") as f: - custom_plugin_file = {"taskPluginFile": (custom_plugin_file.name, f)} - response = requests.post( - self.task_plugin_endpoint, - data=plugin_upload_form, - files=custom_plugin_file, - ) + def create(self, group, description, job, uri): + d = {"group": group, "description": description, "job": job, "uri": uri} + return post(self.session, self.url, d) - return response.json() + def get_by_id(self, artifact_id): + return get(self.session, self.url, str(artifact_id)) + + def modify_by_id(self, artifact_id, description): + d = {"description": description} + return put(self.session, self.url, d, str(artifact_id)) + + def get_snapshots(self, artifact_id): + return get(self.session, self.url, str(artifact_id), "snapshots") + + def get_snapshots_by_artifact_id_snapshot_id(self, artifact_id, snapshot_id): + return get( + self.session, self.url, str(artifact_id), "snapshots", str(snapshot_id) + ) + + +class ModelsClient( + Endpoint, HasTagsProvider, HasDraftsEndpoint, HasSubEndpointProvider +): + def __init__(self, session, ep_name, address): + Endpoint.__init__(self, session, ep_name, address) + HasSubEndpointProvider.__init__(self, self.url) + HasTagsProvider.__init__(self, self.url, self.session) + HasDraftsEndpoint.__init__( + self, self.url, self.session, address, ["name", "description"] + ) + + def create(self, group, name, description): + d = {"group": group, "name": name, "description": description} + return post(self.session, self.url, d) + + def get_by_id(self, model_id): + return get(self.session, self.url, str(model_id)) + + def modify_by_id(self, model_id, name, description): + d = {"name": name, "description": description} + return put(self.session, self.url, d, str(model_id)) + + def delete_by_id(self, model_id): + d = None + return delete(self.session, self.url, d, str(model_id)) + + def get_snapshots_by_model_id(self, model_id): + return get(self.session, self.url, str(model_id), "snapshots") + + def get_snapshot_by_plugin_id_model_id(self, model_id, snapshot_id): + return get(self.session, self.url, str(model_id), "snapshots", str(snapshot_id)) + + def get_versions_by_model_id(self, model_id): + return get(self.session, self.url, str(model_id), "versions") + + def create_version_by_model_id(self, model_id, description, artifact): + d = {"description": description, "artifact": artifact} + return post(self.session, self.url, d, str(model_id), "versions") + + def modify_version_by_model_id_version_id(self, model_id, version_id, description): + d = {"description": description} + return put( + self.session, self.url, d, str(model_id), "versions", str(version_id) + ) + + def get_version_by_model_id_version_id(self, model_id, version_id): + return get(self.session, self.url, str(model_id), "versions", str(version_id)) + + +class DraftsEndpoint(SubEndpoint): + def __init__(self, base_url, parent, session, ep_name, address): + SubEndpoint.__init__(self, parent, session, ep_name, address) + self.base_url = base_url + self.fields = parent.draft_fields # array of field names + self.put_fields = parent.put_fields # used when PUT method differs from create + + @property + def drafts_url(self): + return urljoin(self.base_url, "drafts") + + # /something/id/draft + + def create_draft_for_resource( + self, parent_id, *fields + ): # TODO: what to do about these parameters? they can be different + d = {} + for f in zip(self.fields, fields): + d[f[0]] = f[1] + return post(self.session, self.suburl(parent_id), d) + + def get_draft_for_resource(self, parent_id): + return get(self.session, self.suburl(parent_id)) + + def modify_draft_for_resource(self, parent_id, *fields): + d = {} + for f in zip(self.put_fields, fields): + d[f[0]] = f[1] + return put(self.session, self.suburl(parent_id), d) + + def delete_draft_for_resource(self, parent_id): + d = None + return delete(self.session, self.suburl(parent_id), d) + + # /something/drafts/ + + def get_all(self, draftType=None, groupId=None, index=None, pageLength=None): + """gets all drafts""" + return get(self.session, self.url, build_get_params(draftType=draftType, groupId=groupId, index=index, pageLength=pageLength)) + + def create(self, group_id, *fields): + d = {"group": group_id} + for f in zip(self.fields, fields): + d[f[0]] = f[1] + return post(self.session, self.drafts_url, d) + + def modify_by_draft_id(self, draft_id, *fields): + d = {} + for f in zip(self.put_fields, fields): + d[f[0]] = f[1] + return put(self.session, self.drafts_url, d, str(draft_id)) + + def delete_by_draft_id(self, draft_id): + d = None + return delete(self.session, self.drafts_url, d, str(draft_id)) + + def get_by_draft_id(self, draft_id): + return get(self.session, self.drafts_url, str(draft_id)) + + +class TagsProvider(object): + def __init__(self, base_url, session): + # SubEndpoint.__init__(self, session) + self.url = base_url + self.session = session + + def get(self, parent_id): + return get(self.session, self.url, str(parent_id), "tags") + + def modify(self, parent_id, ids): + d = {"ids": ids} + return put(self.session, self.url, d, str(parent_id), "tags") + + def delete_all(self, parent_id): + d = None + return delete(self.session, self.url, d, str(parent_id), "tags") + + def add(self, parent_id, ids): + d = {"ids": ids} + return post(self.session, self.url, d, str(parent_id), "tags") + + def delete(self, parent_id, tag_id): + d = None + return delete(self.session, self.url, d, str(parent_id), "tags", str(tag_id)) \ No newline at end of file diff --git a/examples/scripts/setup.py b/examples/scripts/setup.py new file mode 100644 index 000000000..c3e4ad1fd --- /dev/null +++ b/examples/scripts/setup.py @@ -0,0 +1,251 @@ +import yaml +from pathlib import Path + +basic_types = ['integer', 'string', 'number', 'any', 'boolean', 'null'] + +def create_or_get_experiment(client, group, name, description, entrypoints): + found = None + for exp in client.experiments.get_all(search=name,pageLength=100000)['data']: + if exp['name'] == name: + found = exp + if (found != None): + client.experiments.add_entrypoints_by_experiment_id(found['id'], entrypoints) + return found + else: + return client.experiments.create(group, name, description, entrypoints) +def create_or_get_entrypoints(client, group, name, description, taskGraph, parameters, queues, plugins): + found = None + for entrypoint in client.entrypoints.get_all(search=name,pageLength=100000)['data']: + if entrypoint['name'] == name: + found = entrypoint + if (found != None): + client.entrypoints.modify_by_id(found['id'], name, description, taskGraph, parameters, queues) + client.entrypoints.add_plugins_by_entrypoint_id(found['id'], plugins) + return found + else: + return client.entrypoints.create(group, name, description, taskGraph, parameters, queues, plugins) +def create_or_get_plugin_type(client, group, name, description, structure): + ret = None + for pt in client.pluginParameterTypes.get_all(pageLength=100000)['data']: + if (pt['name'] == name): + ret = pt + if (ret is None): + ret = client.pluginParameterTypes.create(group, name, description, structure) + return ret +def find_plugin_type(client, name, types): + for t in types.keys(): + if t == name: + return create_or_get_plugin_type(client, 1, name, name, types[t])['id'] + for t in basic_types: + if t == name: + return create_or_get_plugin_type(client, 1, name, 'primitive', {})['id'] + + print("Couldn't find type", name, "in types definition.") + +def create_or_get_queue(client, group, name, description): + ret = None + for queue in client.queues.get_all(pageLength=100000)['data']: + if queue['name'] == name: + ret = queue + if (ret is None): + ret = client.queues.create(group, name, description) + return ret +def plugin_to_py(plugin): + return '../task-plugins/' + '/'.join(plugin.split('.')[:-1]) + '.py' +def create_inputParam_object(client, inputs, types): + ret = [] + for inp in inputs: + if 'name' in inp: + inp_name = inp['name'] + inp_type = inp['type'] + else: + inp_name = list(inp.keys())[0] + inp_type = inp[inp_name] + if 'required' in inp: + inp_req = inp['required'] + else: + inp_req = True + inp_type = find_plugin_type(client, inp_type, types) + ret += [{ + 'name': inp_name, + 'parameterType': inp_type, + 'required': inp_req + }] + return ret +def create_outputParam_object(client, outputs, types): + ret = [] + for outp in outputs: + if isinstance(outp, dict): + outp_name = list(outp.keys())[0] + outp_type = outp[outp_name] + else: + outp_name = outp + outp_type = outputs[outp_name] + outp_type = find_plugin_type(client, outp_type, types) + ret += [{ + 'name': outp_name, + 'parameterType': outp_type, + }] + return ret + +def read_yaml(filename): + with open(filename) as stream: + try: + ret = yaml.safe_load(stream) + except yaml.YAMLError as exc: + print(exc) + return ret +def register_basic_types(client, declared): + for q in basic_types: + type_def = create_or_get_plugin_type(client, 1, q, 'primitive', {}) + for q in declared: + type_def = create_or_get_plugin_type(client, 1, q, 'declared', declared[q]) +def get_plugins_to_register(client, yaml_file, plugins_to_upload=None): + plugins_to_upload = {} if plugins_to_upload is None else plugins_to_upload + yaml = read_yaml(yaml_file) + task_graph = yaml['graph'] + plugins = yaml['tasks'] + types = yaml['types'] + + register_basic_types(client, types) + tasks = [] + for plugin in plugins: + name = plugin + definition = plugins[plugin] + python_file = plugin_to_py(definition['plugin']) + upload = {} + upload['name'] = name + if 'inputs' in definition: + inputs = definition['inputs'] + upload['inputParams'] = create_inputParam_object(client, inputs, types) + else: + upload['inputParams'] = [] + if 'outputs' in definition: + outputs = definition['outputs'] + upload['outputParams'] = create_outputParam_object(client, outputs, types) + else: + upload['outputParams'] = [] + if (python_file in plugins_to_upload): + plugins_to_upload[python_file] += [upload] + else: + plugins_to_upload[python_file] = [upload] + return plugins_to_upload +def create_or_get_plugin(client, group, name, description): + ret = None + for plugin in client.plugins.get_all(search=name,pageLength=100000)['data']: + if plugin['name'] == name: + ret = plugin + if (ret is None): + ret = client.plugins.create(group, name, description) + return ret +def create_or_modify_plugin_file(client, plugin_id, filename, contents, description, tasks): + found = None + for plugin_file in client.plugins.files.get_files_by_plugin_id(plugin_id, pageLength=100000)['data']: + if plugin_file['filename'] == filename: + found = plugin_file + if (found != None): + return client.plugins.files.modify_files_by_plugin_id_file_id(plugin_id, found['id'], filename, contents, description, tasks) + else: + return client.plugins.files.create_files_by_plugin_id(plugin_id, filename, contents, description, tasks) +def register_plugins(client, group, plugins_to_upload): + plugins = [] + for plugin_file in plugins_to_upload.keys(): + plugin_path = Path(plugin_file) + contents = plugin_path.read_text().replace("\r", '') + tasks = plugins_to_upload[plugin_file] + filename = plugin_path.name + description = 'custom plugin for ' + filename + plugin_id = create_or_get_plugin(client, group, plugin_path.parent.name, description)['id'] + plugins += [plugin_id] + uploaded_file = create_or_modify_plugin_file(client, plugin_id, filename, contents, description, tasks) + return list(set(plugins)) +def create_parameters_object(client, params): + ret = [] + type_map = {'int': 'integer', 'float':'float', 'string':'string', 'list':'list', 'bool': 'boolean'} + for p in params: + if (type(params[p]).__name__ in type_map.keys()): + paramType = type_map[type(params[p]).__name__] + #paramType='string' # TODO: remove if backend can handle types correctly + defaultValue = str(params[p]) + else: + defaultValue = str(params[p]) + paramType = 'string' + name = p + param_obj = { + 'name': name, + 'defaultValue': str(defaultValue), + 'parameterType': paramType + } + ret += [param_obj] + return ret +def get_graph_for_upload(yaml_text): + i = 0 + for line in yaml_text: + if line.startswith("graph:"): + break + i += 1 + return ''.join(yaml_text[i+1:]) +def get_parameters_for_upload(yaml_text): + i = 0 + for line in yaml_text: + if line.startswith("parameters:"): + start = i + if line.startswith("tasks:"): + break + i += 1 + return yaml_text[start:i+1] +def register_entrypoint(client, group, name, description, queues, plugins, yaml_file): + yaml = read_yaml(yaml_file) + #task_graph = yaml['graph'] + parameters = yaml['parameters'] + + with open(yaml_file, 'r') as f: + lines = f.readlines() + task_graph = get_graph_for_upload(lines).replace('\r','') + + entrypoint = create_or_get_entrypoints(client, 1, name, description, task_graph, create_parameters_object(client, parameters), queues, plugins) + return entrypoint +def add_missing_plugin_files(location, upload): + p = Path(location) + for child in p.iterdir(): + if (child.name.endswith('.py')): + if (str(child) not in upload.keys()): + upload[str(child)] = [] + return upload + +def upload_experiment(client, entrypoint, entrypoint_name, entrypoint_desc, plugin_files, queue_name, queue_desc, experiment_name, experiment_desc): + upload = get_plugins_to_register(client, entrypoint, {}) + upload = add_missing_plugin_files(plugin_files, upload) + queue = create_or_get_queue(client, 1, queue_name, queue_desc) + queues = [queue['id']] + plugins = register_plugins(client, 1,upload) + entrypoint = register_entrypoint(client, 1, entrypoint_name, entrypoint_desc, queues, plugins, entrypoint) + experiment = create_or_get_experiment(client, 1, experiment_name, experiment_desc, [entrypoint['id']]) + return experiment['id'], entrypoint['id'], queue['id'] + +def run_experiment(client, experiment_id, job_desc, queue_id, entrypoint_id, job_time_limit, parameters = {}): + return client.experiments.create_jobs_by_experiment_id(experiment_id, job_desc, queue_id, entrypoint_id, parameters, job_time_limit) + +def delete_all(client): + for d in client.experiments.get_all(pageLength=100000)['data']: + client.experiments.delete_by_id(d['id']) + for d in client.entrypoints.get_all(pageLength=100000)['data']: + client.entrypoints.delete_by_id(d['id']) + for d in client.jobs.get_all(pageLength=100000)['data']: + client.jobs.delete_by_id(d['id']) + for d in client.models.get_all(pageLength=100000)['data']: + client.models.delete_by_id(d['id']) + for d in client.plugins.get_all(pageLength=100000)['data']: + try: + client.plugins.delete_by_id(d['id']) + except: + pass + for d in client.tags.get_all(pageLength=100000)['data']: + client.tags.delete_by_id(d['id']) + for d in client.pluginParameterTypes.get_all(pageLength=100000)['data']: + try: + client.pluginParameterTypes.delete_by_id(d['id']) + except: + pass + for d in client.queues.get_all(pageLength=100000)['data']: + client.queues.delete_by_id(d['id']) \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_exceptions.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_exceptions.py new file mode 100644 index 000000000..57d002ce1 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_exceptions.py @@ -0,0 +1,23 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module of exceptions for the artifacts plugins collection.""" + +from dioptra.sdk.exceptions.base import BaseTaskPluginError + + +class UnsupportedDataFrameFileFormatError(BaseTaskPluginError): + """The requested data frame file format is not supported.""" diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py new file mode 100644 index 000000000..fa8831a57 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py @@ -0,0 +1,225 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for MLFlow artifacts management. + +This module contains a set of task plugins for managing artifacts generated during an +entry point run. +""" + +import tarfile +from pathlib import Path +from typing import Any, Callable, Dict, Optional, Union, List + +import mlflow +import os +import pandas as pd +import structlog +from mlflow.tracking import MlflowClient +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.utilities.paths import set_path_ext + +from .artifacts_restapi import upload_artifact_to_restapi +from .artifacts_exceptions import UnsupportedDataFrameFileFormatError + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def download_all_artifacts( + uris: List[str], destinations: List[str|Path] +) -> List[str]: + download_paths = [] + for uri in uris: + for dest in destinations: + if uri.endswith(dest): + download_path: str = mlflow.artifacts.download_artifacts( + artifact_uri=uri + ) + LOGGER.info( + "Artifact downloaded from MLFlow run", + artifact_path=download_path + ) + download_paths += [download_path] + return download_paths + + +@pyplugs.register +def upload_data_frame_artifact( + data_frame: pd.DataFrame, + file_name: str, + file_format: str, + file_format_kwargs: Optional[Dict[str, Any]] = None, + working_dir: Optional[Union[str, Path]] = None, +) -> None: + """Uploads a :py:class:`~pandas.DataFrame` as an artifact of the active MLFlow run. + + The `file_format` argument selects the :py:class:`~pandas.DataFrame` serializer, + which are all handled using the object's `DataFrame.to_{format}` methods. The string + passed to `file_format` must match one of the following, + + - `csv[.bz2|.gz|.xz]` - A comma-separated values plain text file with optional + compression. + - `feather` - A binary feather file. + - `json` - A plain text JSON file. + - `pickle` - A binary pickle file. + + Args: + data_frame: A :py:class:`~pandas.DataFrame` to be uploaded. + file_name: The filename to use for the serialized :py:class:`~pandas.DataFrame`. + file_format: The :py:class:`~pandas.DataFrame` file serialization format. + file_format_kwargs: A dictionary of additional keyword arguments to pass to the + serializer. If `None`, then no additional keyword arguments are passed. The + default is `None`. + working_dir: The location where the file should be saved. If `None`, then the + current working directory is used. The default is `None`. + + Notes: + The :py:mod:`pyarrow` package must be installed in order to serialize to the + feather format. + + See Also: + - :py:meth:`pandas.DataFrame.to_csv` + - :py:meth:`pandas.DataFrame.to_feather` + - :py:meth:`pandas.DataFrame.to_json` + - :py:meth:`pandas.DataFrame.to_pickle` + """ + + def to_format( + data_frame: pd.DataFrame, format: str, output_dir: Union[str, Path] + ) -> Dict[str, Any]: + filepath: Path = Path(output_dir) / Path(file_name).name + format_funcs = { + "csv": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv"), + }, + "csv.bz2": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv.bz2"), + }, + "csv.gz": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv.gz"), + }, + "csv.xz": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv.xz"), + }, + "feather": { + "func": data_frame.to_feather, + "filepath": set_path_ext(filepath=filepath, ext="feather"), + }, + "json": { + "func": data_frame.to_json, + "filepath": set_path_ext(filepath=filepath, ext="json"), + }, + "pickle": { + "func": data_frame.to_pickle, + "filepath": set_path_ext(filepath=filepath, ext="pkl"), + }, + } + + func: Optional[Dict[str, Any]] = format_funcs.get(format) + + if func is None: + raise UnsupportedDataFrameFileFormatError( + f"Serializing data frames to the {file_format} format is not supported" + ) + + return func + + if file_format_kwargs is None: + file_format_kwargs = {} + + if working_dir is None: + working_dir = Path.cwd() + + working_dir = Path(working_dir) + format_dict: Dict[str, Any] = to_format( + data_frame=data_frame, format=file_format, output_dir=working_dir + ) + + df_to_format_func: Callable[..., None] = format_dict["func"] + df_artifact_path: Path = format_dict["filepath"] + + df_to_format_func(df_artifact_path, **file_format_kwargs) + LOGGER.info( + "Data frame saved to file", + file_name=df_artifact_path.name, + file_format=file_format, + ) + + upload_file_as_artifact(artifact_path=df_artifact_path) + + +@pyplugs.register +def upload_directory_as_tarball_artifact( + source_dir: Union[str, Path], + tarball_filename: str, + tarball_write_mode: str = "w:gz", + working_dir: Optional[Union[str, Path]] = None, +) -> None: + """Archives a directory and uploads it as an artifact of the active MLFlow run. + + Args: + source_dir: The directory which should be uploaded. + tarball_filename: The filename to use for the archived directory tarball. + tarball_write_mode: The write mode for the tarball, see :py:func:`tarfile.open` + for the full list of compression options. The default is `"w:gz"` (gzip + compression). + working_dir: The location where the file should be saved. If `None`, then the + current working directory is used. The default is `None`. + + See Also: + - :py:func:`tarfile.open` + """ + if working_dir is None: + working_dir = Path.cwd() + + source_dir = Path(source_dir) + working_dir = Path(working_dir) + tarball_path = working_dir / tarball_filename + + with tarfile.open(tarball_path, tarball_write_mode) as f: + f.add(source_dir, arcname=source_dir.name) + + LOGGER.info( + "Directory added to tar archive", + directory=source_dir, + tarball_path=tarball_path, + ) + + upload_file_as_artifact(artifact_path=tarball_path) + + +@pyplugs.register +def upload_file_as_artifact(artifact_path: Union[str, Path]) -> None: + """Uploads a file as an artifact of the active MLFlow run. + + Args: + artifact_path: The location of the file to be uploaded. + + See Also: + - :py:func:`mlflow.log_artifact` + """ + artifact_path = Path(artifact_path) + mlflow.log_artifact(str(artifact_path)) + uri = mlflow.get_artifact_uri(str(artifact_path.name)) + upload_artifact_to_restapi(uri, os.environ['__JOB_ID']) + LOGGER.info("Artifact uploaded for current MLFlow run", filename=artifact_path.name) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_restapi.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_restapi.py new file mode 100644 index 000000000..51e0f1cfe --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_restapi.py @@ -0,0 +1,169 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +import requests +import structlog +import os +from dioptra import pyplugs +from structlog.stdlib import BoundLogger +from posixpath import join as urljoin +from urllib.parse import urlparse, urlunparse + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +@pyplugs.register +def get_uri_for_model(model_name, model_version=-1): + session, url = get_logged_in_session() + models = get(session, url, f'models?search={model_name}&pageLength=500') + for model in models['data']: + if (model['name'] == model_name): + model_id = model['id'] + if (model_version >= 0): + selected_model = get(session, url, + f'models/{model_id}/versions/{model_version}' + ) + else: + selected_model = model['latestVersion'] + + uri = selected_model['artifact']['artifactUri'] + return uri + +def get_uris_for_job(job_id): + session, url = get_logged_in_session() + job = get(session, url, 'jobs', str(job_id)) + return [artifact['artifactUri'] for artifact in job['artifacts']] + +def get_uris_for_artifacts(artifact_ids): + session, url = get_logged_in_session() + return [get(session, url, 'artifacts', aid) for aid in artifact_ids] + +def get_logged_in_session(): + session = requests.Session() + url = "http://dioptra-deployment-restapi:5000/api/v1" + + login = post(session, url, { + 'username':os.environ['DIOPTRA_WORKER_USERNAME'], + 'password':os.environ['DIOPTRA_WORKER_PASSWORD']}, + 'auth', 'login') + LOGGER.info("login request sent", response=str(login)) + + return session, url + +def upload_model_to_restapi(name, source_uri, job_id): + version = 0 + model_id = 0 + + session, url = get_logged_in_session() + + models = get(session, url, f'models?search={name}&pageLength=500') + LOGGER.info("requesting models from RESTAPI", response=models) + + + for model in models['data']: + #check whether to create a new model + if model['name'] == name: + model_id = model['id'] + if model['latestVersion'] != None: + version = model['latestVersion']['versionNumber'] + 1 + if (version == 0 and model_id == 0): + LOGGER.info("creating new model on RESTAPI") + model = post(session, url, {"group": 1, "name": name, "description": f"{name} model"}, "models") + model_id = model['id'] + LOGGER.info("new model created", response=model) + + artifact = post(session, url, {"group": 1, "description": f"{name} model artifact", "job": str(job_id), "uri": source_uri}, 'artifacts') + LOGGER.info("artifact", response=artifact) + model_version = post(session, url, {"description": f"{name} model version", "artifact": artifact['id']}, 'models', str(model_id), 'versions') + LOGGER.info("model created", response=model_version) + +def upload_artifact_to_restapi(source_uri, job_id): + session, url = get_logged_in_session() + + artifact = post(session, url, {"group": 1, "description": f"artifact for job {job_id}", "job": str(job_id), "uri": source_uri}, 'artifacts') + LOGGER.info("artifact", response=artifact) + +def debug_request(url, method, data=None): + LOGGER.debug("Request made.", url=url, method=method, data=data) + + +def debug_response(json): + LOGGER.debug("Response received.", json=json) + + +def get(session, endpoint, *features): + debug_request(urljoin(endpoint, *features), "GET") + return make_request(session, "get", endpoint, None, *features) + + +def post(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "POST", data) + return make_request(session, "post", endpoint, data, *features) + + +def delete(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "DELETE", data) + return make_request(session, "delete", endpoint, data, *features) + + +def put(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "PUT", data) + return make_request(session, "put", endpoint, data, *features) + + +def make_request(session, method_name, endpoint, data, *features): + url = urljoin(endpoint, *features) + method = getattr(session, method_name) + try: + if data: + response = method(url, json=data) + else: + response = method(url) + if response.status_code != 200: + raise StatusCodeError() + json = response.json() + except (requests.ConnectionError, StatusCodeError, requests.JSONDecodeError) as e: + handle_error(session, url, method_name.upper(), data, response, e) + debug_response(json=json) + return json + + +def handle_error(session, url, method, data, response, error): + if type(error) is requests.ConnectionError: + restapi = os.environ["DIOPTRA_RESTAPI_URI"] + message = ( + f"Could not connect to the REST API. Is the server running at {restapi}?" + ) + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise APIConnectionError(message) + if type(error) is StatusCodeError: + message = f"Error code {response.status_code} returned." + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise StatusCodeError(message) + if type(error) is requests.JSONDecodeError: + message = "JSON response could not be decoded." + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise JSONDecodeError(message) + +class APIConnectionError(Exception): + """Class for connection errors""" + + +class StatusCodeError(Exception): + """Class for status code errors""" + + +class JSONDecodeError(Exception): + """Class for JSON decode errors""" diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_utils.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_utils.py new file mode 100644 index 000000000..37404c7ae --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_utils.py @@ -0,0 +1,117 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module containing generic utilities for managing artifacts.""" + +from __future__ import annotations + +import os +import tarfile +import uuid +from pathlib import Path +from tarfile import TarFile +from typing import Any, List, Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +def is_within_directory(directory: Union[str, Path], target: Union[str, Path]) -> bool: + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + + return prefix == abs_directory + + +def safe_extract(tar: TarFile, path: Union[str, Path] = ".") -> None: + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise Exception("Attempted Path Traversal in Tar File") + + tar.extractall(path, members=None, numeric_owner=False) + + +@pyplugs.register +def extract_tarfile( + filepath: Union[str, Path], + tarball_read_mode: str = "r:gz", + output_dir: Any = None, +) -> None: + """Extracts a tarball archive into the current working directory. + + Args: + filepath: The location of the tarball archive file provided as a string or a + :py:class:`~pathlib.Path` object. + tarball_read_mode: The read mode for the tarball, see :py:func:`tarfile.open` + for the full list of compression options. The default is `"r:gz"` (gzip + compression). + + See Also: + - :py:func:`tarfile.open` + """ + output_dir = Path(output_dir) if output_dir is not None else Path.cwd() + + filepath = Path(filepath) + with tarfile.open(filepath, tarball_read_mode) as f: + safe_extract(f, path=output_dir) + + +@pyplugs.register +def make_directories(dirs: List[Union[str, Path]]) -> None: + """Creates directories if they do not exist. + + Args: + dirs: A list of directories provided as strings or :py:class:`~pathlib.Path` + objects. + """ + for d in dirs: + d = Path(d) + d.mkdir(parents=True, exist_ok=True) + LOGGER.info("Directory created", directory=d) + + +@pyplugs.register +def extract_tarfile_in_unique_subdir( + filepath: Union[str, Path], + tarball_read_mode: str = "r:gz", +) -> Path: + """Extracts a tarball archive into a unique subdirectory of the + current working directory. + + Args: + filepath: The location of the tarball archive file provided as a string or a + :py:class:`~pathlib.Path` object. + tarball_read_mode: The read mode for the tarball, see :py:func:`tarfile.open` + for the full list of compression options. The default is `"r:gz"` (gzip + compression). + + See Also: + - :py:func:`tarfile.open` + """ + output_dir = Path(uuid.uuid4().hex) + output_dir.mkdir(mode=0o700, parents=True, exist_ok=True) + + filepath = Path(filepath) + with tarfile.open(filepath, tarball_read_mode) as f: + safe_extract(f, path=output_dir) + return output_dir diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py new file mode 100644 index 000000000..3aa5c3493 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py @@ -0,0 +1,295 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for the Fast Gradient Method evasion attack. + +The Fast Gradient Method (FGM) [goodfellow2015]_ is an evasion attack that attempts to +fool a trained classifier by perturbing a test image using the gradient of the +classifier's neural network. This task plugin uses the Adversarial Robustness Toolbox's +[art2019]_ implementation of the |fgm_art|. + +References: + .. [art2019] M.-I. Nicolae et al., "Adversarial Robustness Toolbox v1.0.0," + Nov. 2019. [Online]. Available: + `arXiv:1807.01069v4 [cs.LG] `_. + + .. [goodfellow2015] I. Goodfellow, J. Shlens, and C. Szegedy. (May 2015). + Explaining and Harnessing Adversarial Examples, Presented at the Int. Conf. + on Learn. Represent. 2015, San Diego, California, United States. [Online]. + Available: `arXiv:1412.6572v3 [stat.ML] `_. + +.. |fgm_art| replace:: `Fast Gradient Method `__ +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple, Union + +import mlflow +import numpy as np +import pandas as pd +import scipy.stats +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.attacks.evasion import FastGradientMethod + from art.estimators.classification import TensorFlowV2Classifier + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def fgm( + data_flow: Any, + data_dir: str, + adv_data_dir: Union[str, Path], + keras_classifier: TensorFlowV2Classifier, + image_size: Tuple[int, int, int], + distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + rescale: float = 1.0 / 255, + batch_size: int = 32, + label_mode: str = "categorical", + eps: float = 0.3, + eps_step: float = 0.1, + minimal: bool = False, + norm: Union[int, float, str] = np.inf, +) -> pd.DataFrame: + """Generates an adversarial dataset using the Fast Gradient Method attack. + + Each generated adversarial image is saved as an image file in the directory + specified by `adv_data_dir` and the distance metric functions passed to + `distance_metrics_list` are used to quantify the size of the perturbation applied to + each image. + + Args: + data_dir: The directory containing the clean test images. + adv_data_dir: The directory to use when saving the generated adversarial images. + keras_classifier: A trained :py:class:`~art.estimators.classification\\ + .TensorFlowV2Classifier`. + image_size: A tuple of integers `(height, width, channels)` used to preprocess + the images so that they all have the same dimensions and number of color + channels. `channels=3` means RGB color images and `channels=1` means + grayscale images. Images with different dimensions will be resized. If + `channels=1`, color images will be converted into grayscale. + distance_metrics_list: A list of distance metrics to compute after generating an + adversarial image. If `None`, then no distance metrics will be calculated. + The default is `None`. + rescale: The rescaling factor for the pixel vectors. If `None` or `0`, no + rescaling is applied, otherwise multiply the data by the value provided + (after applying all other transformations). The default is `1.0 / 255`. + batch_size: The size of the batch on which adversarial samples are generated. + The default is `32`. + label_mode: Determines how the label arrays for the dataset will be returned. + The available choices are: `"categorical"`, `"binary"`, `"sparse"`, + `"input"`, `None`. For information on the meaning of each choice, see + the documentation for |flow_from_directory|. The default is `"categorical"`. + eps: The attack step size. The default is `0.3`. + eps_step: The step size of the input variation for minimal perturbation + computation. The default is `0.1`. + minimal: If `True`, compute the minimal perturbation, and use `eps_step` for the + step size and `eps` for the maximum perturbation. The default is `False`. + norm: The norm of the adversarial perturbation. Can be `"inf"`, + :py:data:`numpy.inf`, `1`, or `2`. The default is :py:data:`numpy.inf`. + + Returns: + A :py:class:`~pandas.DataFrame` containing the full distribution of the + calculated distance metrics. + + See Also: + - |flow_from_directory| + + .. |flow_from_directory| replace:: :py:meth:`tf.keras.preprocessing.image\\ + .ImageDataGenerator.flow_from_directory` + """ + distance_metrics_list = distance_metrics_list or [] + adv_data_dir = Path(adv_data_dir) + + attack = _init_fgm( + keras_classifier=keras_classifier, + batch_size=batch_size, + eps=eps, + eps_step=eps_step, + minimal=minimal, + norm=norm, + ) + print(data_flow) + + num_images = data_flow.n + img_filenames = [Path(x) for x in data_flow.filenames] + + distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} + for metric_name, _ in distance_metrics_list: + distance_metrics_[metric_name] = [] + + LOGGER.info( + "Generate adversarial images", + attack="fgm", + num_batches=num_images // batch_size, + ) + + for batch_num, (x, y) in enumerate(data_flow): + if batch_num >= num_images // batch_size: + break + + clean_filenames = img_filenames[ + batch_num * batch_size : (batch_num + 1) * batch_size # noqa: E203 + ] + + LOGGER.info( + "Generate adversarial image batch", + attack="fgm", + batch_num=batch_num, + ) + + y_int = np.argmax(y, axis=1) + adv_batch = attack.generate(x=x) + + _save_adv_batch(adv_batch, adv_data_dir, y_int, clean_filenames) + + _evaluate_distance_metrics( + clean_filenames=clean_filenames, + distance_metrics_=distance_metrics_, + clean_batch=x, + adv_batch=adv_batch, + distance_metrics_list=distance_metrics_list, + ) + + LOGGER.info("Adversarial image generation complete", attack="fgm") + _log_distance_metrics(distance_metrics_) + + return pd.DataFrame(distance_metrics_) + + +def _init_fgm( + keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs +) -> FastGradientMethod: + """Initializes :py:class:`~art.attacks.evasion.FastGradientMethod`. + + Args: + keras_classifier: A trained :py:class:`~art.estimators.classification\\ + .TensorFlowV2Classifier`. + batch_size: The size of the batch on which adversarial samples are generated. + + Returns: + A :py:class:`~art.attacks.evasion.FastGradientMethod` object. + """ + attack: FastGradientMethod = FastGradientMethod( + estimator=keras_classifier, batch_size=batch_size, **kwargs + ) + return attack + + +def _save_adv_batch(adv_batch, adv_data_dir, y, clean_filenames) -> None: + """Saves a batch of adversarial images to disk. + + Args: + adv_batch: A generated batch of adversarial images. + adv_data_dir: The directory to use when saving the generated adversarial images. + y: An array containing the target labels of the original images. + clean_filenames: A list containing the filenames of the original images. + """ + for batch_image_num, adv_image in enumerate(adv_batch): + adv_image_path = ( + adv_data_dir + / f"{y[batch_image_num]}" + / f"adv_{clean_filenames[batch_image_num].name}" + ) + + if not adv_image_path.parent.exists(): + adv_image_path.parent.mkdir(parents=True) + + save_img(path=str(adv_image_path), x=adv_image) + + +def _evaluate_distance_metrics( + clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list +) -> None: + """Calculates distance metrics for a batch of clean/adversarial image pairs. + + Args: + clean_filenames: A list containing the filenames of the original images. + distance_metrics_: A dictionary used to record the values of the distance + metrics computed for the clean/adversarial image pairs. + clean_batch: The clean images used to generate the adversarial images in + `adv_batch`. + adv_batch: A generated batch of adversarial images. + distance_metrics_list: A list of distance metrics to compute after generating an + adversarial image. + """ + LOGGER.debug("evaluate image perturbations using distance metrics") + distance_metrics_["image"].extend([x.name for x in clean_filenames]) + distance_metrics_["label"].extend([x.parent for x in clean_filenames]) + for metric_name, metric in distance_metrics_list: + distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) + + +def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: + """Logs the distance metrics summary statistics to the MLFlow Tracking service. + + The following summary statistics are calculated and logged to the MLFlow Tracking + service for each of the distributions recorded in the `distance_metrics_` + dictionary: + + - mean + - median + - standard deviation + - interquartile range + - minimum + - maximum + + Args: + distance_metrics_: A dictionary used to record the values of the distance + metrics computed for the clean/adversarial image pairs. + """ + distance_metrics_ = distance_metrics_.copy() + del distance_metrics_["image"] + del distance_metrics_["label"] + for metric_name, metric_values_list in distance_metrics_.items(): + metric_values = np.array(metric_values_list) + mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) + mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) + mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) + mlflow.log_metric( + key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) + ) + mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) + mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) + LOGGER.info("logged distance-based metric", metric_name=metric_name) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/backend_configs_tensorflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/backend_configs_tensorflow.py new file mode 100644 index 000000000..10ca767e5 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/backend_configs_tensorflow.py @@ -0,0 +1,52 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for initializing and configuring Tensorflow.""" + +from __future__ import annotations + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +try: + import tensorflow as tf + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def init_tensorflow(seed: int) -> None: + """Initializes Tensorflow to ensure reproducibility. + + This task plugin **must** be run before any other features from Tensorflow are used + to ensure reproducibility. + + Args: + seed: The seed to use for Tensorflow's random number generator. + """ + tf.random.set_seed(seed) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py new file mode 100644 index 000000000..8b16d6804 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py @@ -0,0 +1,130 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for preparing Tensorflow-specific dataset iterators. + +.. |flow_from_directory| replace:: :py:meth:`tensorflow.keras.preprocessing.image\\ + .ImageDataGenerator.flow_from_directory` +.. |directory_iterator| replace:: :py:class:`~tensorflow.keras.preprocessing.image\\ + .DirectoryIterator` +""" + +from __future__ import annotations + +from typing import Optional, Tuple + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.preprocessing.image import ( + DirectoryIterator, + ImageDataGenerator, + ) + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_image_dataset( + data_dir: str, + subset: Optional[str], + image_size: Tuple[int, int, int], + seed: int, + rescale: float = 1.0 / 255, + validation_split: Optional[float] = 0.2, + batch_size: int = 32, + label_mode: str = "categorical", + shuffle: bool = True, +) -> DirectoryIterator: + """Yields an iterator for generating batches of real-time augmented image data. + + Args: + data_dir: The directory containing the image dataset. + subset: The subset of data (`"training"` or `"validation"`) to use if + `validation_split` is not `None`. If `None`, then `validation_split` must + also be `None`. + image_size: A tuple of integers `(height, width, channels)` used to preprocess + the images so that they all have the same dimensions and number of color + channels. `channels=3` means RGB color images and `channels=1` means + grayscale images. Images with different dimensions will be resized. If + `channels=1`, color images will be converted into grayscale. + seed: Sets the random seed used for shuffling and transformations. + rescale: The rescaling factor for the pixel vectors. If `None` or `0`, no + rescaling is applied, otherwise multiply the data by the value provided + (after applying all other transformations). The default is `1.0 / 255`. + validation_split: The fraction of the data to set aside for validation. If not + `None`, the value given here must be between `0` and `1`. If `None`, then + there is no validation set. The default is `0.2`. + batch_size: The size of the batch on which adversarial samples are generated. + The default is `32`. + label_mode: Determines how the label arrays for the dataset will be returned. + The available choices are: `"categorical"`, `"binary"`, `"sparse"`, + `"input"`, `None`. For information on the meaning of each choice, see + the documentation for |flow_from_directory|. The default is `"categorical"`. + + Returns: + A :py:class:`~tensorflow.keras.preprocessing.image.DirectoryIterator` object. + + See Also: + - |flow_from_directory| + - :py:class:`~tensorflow.keras.preprocessing.image.DirectoryIterator` + """ + color_mode: str = ( + "rgb" if image_size[2] == 3 else "rgba" if image_size[2] == 4 else "grayscale" + ) + target_size: Tuple[int, int] = image_size[:2] + + data_generator: ImageDataGenerator = ImageDataGenerator( + rescale=rescale, + validation_split=validation_split, + ) + + return data_generator.flow_from_directory( + directory=data_dir, + target_size=target_size, + color_mode=color_mode, + class_mode=label_mode, + batch_size=batch_size, + seed=seed, + subset=subset, + shuffle=shuffle + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_n_classes_from_directory_iterator(ds: DirectoryIterator) -> int: + """Returns the number of unique labels found by the |directory_iterator|. + + Args: + ds: A |directory_iterator| object. + + Returns: + The number of unique labels in the dataset. + """ + return len(ds.class_indices) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_keras_classifiers.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_keras_classifiers.py new file mode 100644 index 000000000..09ce35aa6 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_keras_classifiers.py @@ -0,0 +1,230 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""Neural network image classifiers implemented in Tensorflow/Keras.""" + +from __future__ import annotations + +from types import FunctionType +from typing import Callable, Dict, List, Tuple, Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.layers import ( + BatchNormalization, + Conv2D, + Dense, + Dropout, + Flatten, + MaxPooling2D, + ) + from tensorflow.keras.metrics import Metric + from tensorflow.keras.models import Sequential + from tensorflow.keras.optimizers import Optimizer + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def init_classifier( + model_architecture: str, + optimizer: Optimizer, + metrics: List[Union[Metric, FunctionType]], + input_shape: Tuple[int, int, int], + n_classes: int, + loss: str = "categorical_crossentropy", +) -> Sequential: + """Initializes an untrained neural network image classifier for Tensorflow/Keras. + + The `model_architecture` argument is used to select a neural network architecture + from the architecture registry. The string passed to `model_architecture` must match + one of the following, + + - `"shallow_net"` - A shallow neural network architecture. + - `"le_net"` - The LeNet-5 convolutional neural network architecture. + - `"alex_net"` - The AlexNet convolutional neural network architecture. + + Args: + model_architecture: The neural network architecture to use. + optimizer: A Keras :py:class:`~tf.keras.optimizers.Optimizer` providing an + algorithm to use to train the estimator, such as + :py:class:`~tf.keras.optimizers.SGD` and + :py:class:`~tf.keras.optimizers.Adam`. + metrics: A list of metrics to be evaluated by the model during training and + testing. + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + loss: A string specifying the loss function to be minimized during training. The + string must match the name of one of the loss functions in the + :py:mod:`tf.keras.losses` module. The default is + `"categorical_crossentropy"`. + + Returns: + A compiled :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:mod:`tf.keras.losses` + - :py:mod:`tf.keras.optimizers` + - :py:class:`tf.keras.Sequential` + """ + classifier: Sequential = KERAS_CLASSIFIERS_REGISTRY[model_architecture]( + input_shape, + n_classes, + ) + classifier.compile(loss=loss, optimizer=optimizer, metrics=metrics) + return classifier + + +def shallow_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: + """Builds an untrained shallow neural network architecture for Tensorflow/Keras. + + Args: + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + + Returns: + A :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:class:`tf.keras.Sequential` + """ + model = Sequential() + + # Flatten inputs + model.add(Flatten(input_shape=input_shape)) + + # single hidden layer: + model.add(Dense(32, activation="sigmoid")) + + # output layer: + model.add(Dense(n_classes, activation="softmax")) + + return model + + +def le_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: + """Builds an untrained LeNet-5 neural network architecture for Tensorflow/Keras. + + Args: + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + + Returns: + A :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:class:`tf.keras.Sequential` + """ + model = Sequential() + + # first convolutional layer: + model.add( + Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape) + ) + + # second conv layer, with pooling and dropout: + model.add(Conv2D(64, kernel_size=(3, 3), activation="relu")) + model.add(MaxPooling2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + model.add(Flatten()) + + # dense hidden layer, with dropout: + model.add(Dense(128, activation="relu")) + model.add(Dropout(0.5)) + + # output layer: + model.add(Dense(n_classes, activation="softmax")) + + return model + + +def alex_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: + """Builds an untrained AlexNet neural network architecture for Tensorflow/Keras. + + Args: + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + + Returns: + A :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:class:`tf.keras.Sequential` + """ + model = Sequential() + + # first conv-pool block: + model.add( + Conv2D( + 96, + kernel_size=(11, 11), + strides=(4, 4), + activation="relu", + input_shape=input_shape, + ) + ) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) + model.add(BatchNormalization()) + + # second conv-pool block: + model.add(Conv2D(256, kernel_size=(5, 5), activation="relu")) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) + model.add(BatchNormalization()) + + # third conv-pool block: + model.add(Conv2D(256, kernel_size=(3, 3), activation="relu")) + model.add(Conv2D(384, kernel_size=(3, 3), activation="relu")) + model.add(Conv2D(384, kernel_size=(3, 3), activation="relu")) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) + model.add(BatchNormalization()) + + # dense layers: + model.add(Flatten()) + model.add(Dense(4096, activation="tanh")) + model.add(Dropout(0.5)) + model.add(Dense(4096, activation="tanh")) + model.add(Dropout(0.5)) + + # output layer: + model.add(Dense(n_classes, activation="softmax")) + + return model + + +KERAS_CLASSIFIERS_REGISTRY: Dict[ + str, Callable[[Tuple[int, int, int], int], Sequential] +] = dict(shallow_net=shallow_net, le_net=le_net, alex_net=alex_net) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_methods.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_methods.py new file mode 100644 index 000000000..28396c530 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/estimators_methods.py @@ -0,0 +1,122 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +import datetime +from typing import Any, Dict, Optional + +import mlflow +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.generics import estimator_predict, fit_estimator + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def fit( + estimator: Any, + x: Any = None, + y: Any = None, + fit_kwargs: Optional[Dict[str, Any]] = None, +) -> Any: + """Fits the estimator to the given data. + + This task plugin wraps :py:func:`~dioptra.sdk.generics.fit_estimator`, which is a + generic function that uses multiple argument dispatch to handle the estimator + fitting method for different machine learning libraries. The modules attached to the + advertised plugin entry point `dioptra.generics.fit_estimator` are used to build the + function dispatch registry at runtime. For more information on the supported fitting + methods and `fit_kwargs` arguments, please refer to the documentation of the + registered dispatch functions. + + Args: + estimator: The model to be trained. + x: The input data to be used for training. + y: The target data to be used for training. + fit_kwargs: An optional dictionary of keyword arguments to pass to the + dispatched function. + + Returns: + The object returned by the estimator's fitting function. For further details on + the type of object this method can return, see the documentation for the + registered dispatch functions. + + See Also: + - :py:func:`dioptra.sdk.generics.fit_estimator` + """ + fit_kwargs = fit_kwargs or {} + time_start: datetime.datetime = datetime.datetime.now() + + LOGGER.info( + "Begin estimator fit", + timestamp=time_start.isoformat(), + ) + + estimator_fit_result: Any = fit_estimator(estimator, x, y, **fit_kwargs) + + time_end: datetime.datetime = datetime.datetime.now() + + total_seconds: float = (time_end - time_start).total_seconds() + total_minutes: float = total_seconds / 60 + + mlflow.log_metric("training_time_in_minutes", total_minutes) + LOGGER.info( + "Estimator fit complete", + timestamp=time_end.isoformat(), + total_minutes=total_minutes, + ) + + return estimator_fit_result + + +@pyplugs.register +def predict( + estimator: Any, + x: Any = None, + predict_kwargs: Optional[Dict[str, Any]] = None, +) -> Any: + """Uses the estimator to make predictions on the given input data. + + This task plugin wraps :py:func:`~dioptra.sdk.generics.estimator_predict`, which is + a generic function that uses multiple argument dispatch to handle estimator + prediction methods for different machine learning libraries. The modules attached to + the advertised plugin entry point `dioptra.generics.estimator_predict` are used to + build the function dispatch registry at runtime. For more information on the + supported prediction methods and `predict_kwargs` arguments, refer to the + documentation of the registered dispatch functions. + + Args: + estimator: A trained model to be used to generate predictions. + x: The input data for which to generate predictions. + predict_kwargs: An optional dictionary of keyword arguments to pass to the + dispatched function. + + Returns: + The object returned by the estimator's predict function. For further details on + the type of object this method can return, see the documentation for the + registered dispatch functions. + + See Also: + - :py:func:`dioptra.sdk.generics.estimator_predict` + """ + predict_kwargs = predict_kwargs or {} + prediction: Any = estimator_predict(estimator, x, **predict_kwargs) + + return prediction diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/import_keras.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/import_keras.py new file mode 100644 index 000000000..b5d03b51c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/import_keras.py @@ -0,0 +1,65 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +import importlib +from types import FunctionType, ModuleType +from typing import Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.callbacks import Callback + from tensorflow.keras.metrics import Metric + from tensorflow.keras.optimizers import Optimizer + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + +KERAS_CALLBACKS: str = "tensorflow.keras.callbacks" +KERAS_METRICS: str = "tensorflow.keras.metrics" +KERAS_OPTIMIZERS: str = "tensorflow.keras.optimizers" + + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_callback(callback_name: str) -> Callback: + keras_callbacks: ModuleType = importlib.import_module(KERAS_CALLBACKS) + callback: Callback = getattr(keras_callbacks, callback_name) + return callback + + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_metric(metric_name: str) -> Union[Metric, FunctionType]: + keras_metrics: ModuleType = importlib.import_module(KERAS_METRICS) + metric: Metric = getattr(keras_metrics, metric_name) + return metric + + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_optimizer(optimizer_name: str) -> Optimizer: + keras_optimizers: ModuleType = importlib.import_module(KERAS_OPTIMIZERS) + optimizer: Optimizer = getattr(keras_optimizers, optimizer_name) + return optimizer diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_distance.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_distance.py new file mode 100644 index 000000000..034f5a02c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_distance.py @@ -0,0 +1,307 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for getting functions from a distance metric registry. + +.. |Linf| replace:: L\\ :sub:`∞` +.. |L1| replace:: L\\ :sub:`1` +.. |L2| replace:: L\\ :sub:`2` +""" + +from __future__ import annotations + +from typing import Any, Callable, Dict, List, Optional, Tuple + +import numpy as np +import structlog +from scipy.stats import wasserstein_distance +from sklearn.metrics.pairwise import paired_distances +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +from .metrics_exceptions import UnknownDistanceMetricError + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def get_distance_metric_list( + request: List[Dict[str, str]] +) -> List[Tuple[str, Callable[..., np.ndarray]]]: + """Gets multiple distance metric functions from the registry. + + The following metrics are available in the registry, + + - `l_inf_norm` + - `l_1_norm` + - `l_2_norm` + - `paired_cosine_similarities` + - `paired_euclidean_distances` + - `paired_manhattan_distances` + - `paired_wasserstein_distances` + + Args: + request: A list of dictionaries with the keys `name` and `func`. The `func` key + is used to lookup the metric function in the registry and must match one of + the metric names listed above. The `name` key is human-readable label for + the metric function. + + Returns: + A list of tuples with two elements. The first element of each tuple is the label + from the `name` key of `request`, and the second element is the callable metric + function. + """ + distance_metrics_list: List[Tuple[str, Callable[..., np.ndarray]]] = [] + + for metric in request: + metric_callable: Optional[Callable[..., np.ndarray]] = ( + DISTANCE_METRICS_REGISTRY.get(metric["func"]) + ) + + if metric_callable is not None: + distance_metrics_list.append((metric["name"], metric_callable)) + + else: + LOGGER.warn( + "Distance metric not in registry, skipping...", + name=metric["name"], + func=metric["func"], + ) + + return distance_metrics_list + + +@pyplugs.register +def get_distance_metric(func: str) -> Callable[..., np.ndarray]: + """Gets a distance metric function from the registry. + + The following metrics are available in the registry, + + - `l_inf_norm` + - `l_1_norm` + - `l_2_norm` + - `paired_cosine_similarities` + - `paired_euclidean_distances` + - `paired_manhattan_distances` + - `paired_wasserstein_distances` + + Args: + func: A string that identifies the distance metric to return from the registry. + The string must match one of the names of the metrics in the registry. + + Returns: + A callable distance metric function. + """ + metric_callable: Optional[Callable[..., np.ndarray]] = ( + DISTANCE_METRICS_REGISTRY.get(func) + ) + + if metric_callable is None: + LOGGER.error( + "Distance metric not in registry", + func=func, + ) + raise UnknownDistanceMetricError( + f"Could not find any distance metric named {func!r} in the metrics " + "plugin collection. Check spelling and try again." + ) + + return metric_callable + + +def l_inf_norm(y_true, y_pred) -> np.ndarray: + """Calculates the |Linf| norm between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of |Linf| norms. + """ + metric: np.ndarray = _matrix_difference_l_norm( + y_true=y_true, y_pred=y_pred, order=np.inf + ) + return metric + + +def l_1_norm(y_true, y_pred) -> np.ndarray: + """Calculates the |L1| norm between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of |L1| norms. + """ + metric: np.ndarray = _matrix_difference_l_norm( + y_true=y_true, y_pred=y_pred, order=1 + ) + return metric + + +def l_2_norm(y_true, y_pred) -> np.ndarray: + """Calculates the |L2| norm between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of |L2| norms. + """ + metric: np.ndarray = _matrix_difference_l_norm( + y_true=y_true, y_pred=y_pred, order=2 + ) + return metric + + +def paired_cosine_similarities(y_true, y_pred) -> np.ndarray: + """Calculates the cosine similarity between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of cosine similarities. + """ + y_true_normalized: np.ndarray = _normalize_batch(_flatten_batch(y_true), order=2) + y_pred_normalized: np.ndarray = _normalize_batch(_flatten_batch(y_pred), order=2) + metric: np.ndarray = np.sum(y_true_normalized * y_pred_normalized, axis=1) + return metric + + +def paired_euclidean_distances(y_true, y_pred) -> np.ndarray: + """Calculates the Euclidean distance between a batch of two matrices. + + The Euclidean distance is equivalent to the |L2| norm. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of euclidean distances. + """ + metric: np.ndarray = l_2_norm(y_true=y_true, y_pred=y_pred) + return metric + + +def paired_manhattan_distances(y_true, y_pred) -> np.ndarray: + """Calculates the Manhattan distance between a batch of two matrices. + + The Manhattan distance is equivalent to the |L1| norm. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of Manhattan distances. + """ + metric: np.ndarray = l_1_norm(y_true=y_true, y_pred=y_pred) + return metric + + +def paired_wasserstein_distances(y_true, y_pred, **kwargs) -> np.ndarray: + """Calculates the Wasserstein distance between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of Wasserstein distances. + + See Also: + - :py:func:`scipy.stats.wasserstein_distance` + """ + + def wrapped_metric(X, Y): + return wasserstein_distance(u_values=X, v_values=Y, **kwargs) + + metric: np.ndarray = paired_distances( + X=_flatten_batch(y_true), Y=_flatten_batch(y_pred), metric=wrapped_metric + ) + return metric + + +def _flatten_batch(X: np.ndarray) -> np.ndarray: + """Flattens each of the matrices in a batch into a one-dimensional array. + + Args: + X: A batch of matrices. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of one-dimensional arrays. + """ + num_samples: int = X.shape[0] + num_matrix_elements: int = int(np.prod(X.shape[1:])) + return X.reshape((num_samples, num_matrix_elements)) + + +def _matrix_difference_l_norm(y_true, y_pred, order) -> np.ndarray: + """Calculates a batch of norms of the difference between two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + order: The order of the norm, see :py:func:`numpy.linalg.norm` for the full list + of norms that can be calculated. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of norms. + + See Also: + - :py:func:`numpy.linalg.norm` + """ + y_diff: np.ndarray = _flatten_batch(y_true - y_pred) + y_diff_l_norm: np.ndarray = np.linalg.norm(y_diff, axis=1, ord=order) + return y_diff_l_norm + + +def _normalize_batch(X: np.ndarray, order: int) -> np.ndarray: + """Normalizes a batch of matrices by their norms. + + Args: + X: A batch of matrices to be normalized. + order: The order of the norm used for normalization, see + :py:func:`numpy.linalg.norm` for the full list of available norms. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of normalized matrices. + + See Also: + - :py:func:`numpy.linalg.norm` + """ + X_l_norm: np.ndarray = np.linalg.norm(X, axis=1, ord=order) + num_samples: int = X_l_norm.shape[0] + normalized_batch: np.ndarray = X / X_l_norm.reshape((num_samples, 1)) + return normalized_batch + + +DISTANCE_METRICS_REGISTRY: Dict[str, Callable[..., Any]] = dict( + l_inf_norm=l_inf_norm, + l_1_norm=l_1_norm, + l_2_norm=l_2_norm, + paired_cosine_similarities=paired_cosine_similarities, + paired_euclidean_distances=paired_euclidean_distances, + paired_manhattan_distances=paired_manhattan_distances, + paired_wasserstein_distances=paired_wasserstein_distances, +) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_exceptions.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_exceptions.py new file mode 100644 index 000000000..fc88cc25c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_exceptions.py @@ -0,0 +1,27 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module of exceptions for the metrics plugins collection.""" + +from dioptra.sdk.exceptions.base import BaseTaskPluginError + + +class UnknownDistanceMetricError(BaseTaskPluginError): + """The requested distance metric could not be located.""" + + +class UnknownPerformanceMetricError(BaseTaskPluginError): + """The requested performance metric could not be located.""" diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/mlflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/mlflow.py new file mode 100644 index 000000000..8546dff8c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/mlflow.py @@ -0,0 +1,103 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for using the MLFlow model registry.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Optional + +import mlflow +import os +import structlog +from mlflow.entities.model_registry import ModelVersion +from mlflow.tracking import MlflowClient +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from .artifacts_restapi import upload_model_to_restapi +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def add_model_to_registry(name: str, model_dir: str) -> Optional[ModelVersion]: + """Registers a trained model logged during the current run to the MLFlow registry. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + name: The registration name to use for the model. + model_dir: The relative artifact directory where MLFlow logged the model trained + during the current run. + + Returns: + A :py:class:`~mlflow.entities.model_registry.ModelVersion` object created by the + backend. + """ + job_id = os.environ['__JOB_ID'] + if not name.strip(): + return None + + active_run = mlflow.active_run() + + run_id: str = active_run.info.run_id + artifact_uri: str = active_run.info.artifact_uri + source: str = f"{artifact_uri}/{model_dir}" + + registered_models = [x.name for x in MlflowClient().search_registered_models()] + + if name not in registered_models: + LOGGER.info("create registered model", name=name) + MlflowClient().create_registered_model(name=name) + + LOGGER.info("create model version", name=name, source=source, run_id=run_id) + model_version: ModelVersion = MlflowClient().create_model_version( + name=name, source=source, run_id=run_id + ) + upload_model_to_restapi(name, source, job_id) + + return model_version + + +@pyplugs.register +def get_experiment_name() -> str: + """Gets the name of the experiment for the current run. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + + Returns: + The name of the experiment. + """ + active_run = mlflow.active_run() + + experiment_name: str = ( + MlflowClient().get_experiment(active_run.info.experiment_id).name + ) + LOGGER.info( + "Obtained experiment name of active run", experiment_name=experiment_name + ) + + return experiment_name + + +@pyplugs.register +def prepend_cwd(path: str) -> Path: + ret = Path.cwd() / path + return ret + diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py new file mode 100644 index 000000000..e47cbd34b --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -0,0 +1,244 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple, Union, Any + +import mlflow +import numpy as np +import pandas as pd +import scipy.stats +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from .tensorflow import get_optimizer, get_model_callbacks, get_performance_metrics, evaluate_metrics_tensorflow +from .estimators_keras_classifiers import init_classifier +from .registry_art import load_wrapped_tensorflow_keras_classifier +from .registry_mlflow import load_tensorflow_keras_classifier +from .random_rng import init_rng +from .random_sample import draw_random_integer +from .backend_configs_tensorflow import init_tensorflow +from .tracking_mlflow import log_parameters, log_tensorflow_keras_estimator, log_metrics +from .data_tensorflow import get_n_classes_from_directory_iterator, create_image_dataset +from .estimators_methods import fit +from .mlflow import add_model_to_registry +from .artifacts_restapi import get_uri_for_model, get_uris_for_job, get_uris_for_artifacts +from .artifacts_utils import make_directories, extract_tarfile +from .metrics_distance import get_distance_metric_list +from .attacks_fgm import fgm +from .artifacts_mlflow import upload_directory_as_tarball_artifact, upload_data_frame_artifact, download_all_artifacts + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +@pyplugs.register +def load_dataset( + ep_seed: int = 10145783023, + data_dir: str = "/dioptra/data/Mnist/testing", + subsets: List[str] = ['testing'], + image_size: Tuple[int, int, int] = [28, 28, 1], + rescale: float = 1.0 / 255, + validation_split: Optional[float] = 0.2, + batch_size: int = 32, + label_mode: str = "categorical", + shuffle: bool = False +) -> DirectoryIterator: + seed, rng = init_rng(ep_seed) + global_seed = draw_random_integer(rng) + dataset_seed = draw_random_integer(rng) + init_tensorflow(global_seed) + log_parameters( + {'entry_point_seed': ep_seed, + 'tensorflow_global_seed':global_seed, + 'dataset_seed':dataset_seed}) + training_dataset = None if "training" not in subsets else create_image_dataset( + data_dir=data_dir, + subset="training", + image_size=image_size, + seed=dataset_seed, + rescale=rescale, + validation_split=validation_split, + batch_size=batch_size, + label_mode=label_mode, + shuffle=shuffle + ) + + validation_dataset = None if "validation" not in subsets else create_image_dataset( + data_dir=data_dir, + subset="validation", + image_size=image_size, + seed=dataset_seed, + rescale=rescale, + validation_split=validation_split, + batch_size=batch_size, + label_mode=label_mode, + shuffle=shuffle + ) + testing_dataset = None if "testing" not in subsets else create_image_dataset( + data_dir=data_dir, + subset=None, + image_size=image_size, + seed=dataset_seed, + rescale=rescale, + validation_split=validation_split, + batch_size=batch_size, + label_mode=label_mode, + shuffle=shuffle + ) + return training_dataset, validation_dataset, testing_dataset + +@pyplugs.register +def create_model( + dataset: DirectoryIterator = None, + model_architecture: str = "le_net", + input_shape: Tuple[int, int, int] = [28, 28, 1], + loss: str = "categorical_crossentropy", + learning_rate: float = 0.001, + optimizer: str = "Adam", + metrics_list: List[Dict[str, Any]] = None, +): + n_classes = get_n_classes_from_directory_iterator(dataset) + optim = get_optimizer(optimizer, learning_rate) + perf_metrics = get_performance_metrics(metrics_list) + classifier = init_classifier(model_architecture, optim, perf_metrics, input_shape, n_classes, loss) + return classifier + +@pyplugs.register +def load_model( + model_name: str | None = None, + model_version: int | None = None, + imagenet_preprocessing: bool = False, + art: bool = False, + classifier_kwargs: Optional[Dict[str, Any]] = None +): + uri = get_uri_for_model(model_name, model_version) + if (art): + classifier = load_wrapped_tensorflow_keras_classifier(uri, imagenet_preprocessing, classifier_kwargs) + else: + classifier = load_tensorflow_keras_classifier(uri) + return classifier + +@pyplugs.register +def train( + estimator: Any, + x: Any = None, + y: Any = None, + callbacks_list: List[Dict[str, Any]] = None, + fit_kwargs: Optional[Dict[str, Any]] = None +): + fit_kwargs = {} if fit_kwargs is None else fit_kwargs + callbacks = get_model_callbacks(callbacks_list) + fit_kwargs['callbacks'] = callbacks + trained_model = fit(estimator=estimator, x=x, y=y, fit_kwargs=fit_kwargs) + return estimator + +@pyplugs.register +def save_artifacts_and_models( + artifacts: List[Dict[str, Any]] = None, + models: List[Dict[str, Any]] = None +): + artifacts = [] if artifacts is None else artifacts + models = [] if models is None else models + + for model in models: + log_tensorflow_keras_estimator(model['model'], "model") + add_model_to_registry(model['name'], "model") + for artifact in artifacts: + if (artifact['type'] == 'tarball'): + upload_directory_as_tarball_artifact( + source_dir=artifact['adv_data_dir'], + tarball_filename=artifact['adv_tar_name'] + ) + if (artifact['type'] == 'dataframe'): + upload_data_frame_artifact( + data_frame=artifact['data_frame'], + file_name=artifact['file_name'], + file_format=artifact['file_format'], + file_format_kwargs=artifact['file_format_kwargs'] + ) +@pyplugs.register +def load_artifacts_for_job( + job_id: str, extract_files: List[str|Path] = None +): + extract_files = [] if extract_files is None else extract_files + uris = get_uris_for_job(job_id) + paths = download_all_artifacts(uris, extract_files) + for extract in paths: + extract_tarfile(extract) + +@pyplugs.register +def load_artifacts( + artifact_ids: List[int] = None, extract_files: List[str|Path] = None +): + extract_files = [] if extract_files is None else extract_files + artifact_ids = [] if artifact_ids is not None else artifact_ids + uris = get_uris_for_artifacts(artifact_ids) + paths = download_all_artifacts(uris, extract_files) + for extract in paths: + extract_tarfile(extract) + +@pyplugs.register +def attack( + dataset: Any, + data_dir: str, + adv_data_dir: Union[str, Path], + classifier: Any, + image_size: Tuple[int, int, int], + distance_metrics: List[Dict[str, str]], + rescale: float = 1.0 / 255, + batch_size: int = 32, + label_mode: str = "categorical", + eps: float = 0.3, + eps_step: float = 0.1, + minimal: bool = False, + norm: Union[int, float, str] = np.inf, +): + make_directories(adv_data_dir) + distance_metrics_list = get_distance_metric_list(distance_metrics) + fgm_dataset = fgm( + data_flow=dataset, + data_dir=data_dir, + adv_data_dir=adv_data_dir, + keras_classifier=classifier, + image_size=image_size, + distance_metrics_list=distance_metrics_list, + rescale=rescale, + batch_size=batch_size, + label_mode=label_mode, + eps=eps, + eps_step=eps_step, + minimal=minimal, + norm=norm + ) + return fgm_dataset + +@pyplugs.register +def compute_metrics( + classifier: Any, + dataset: Any +): + metrics = evaluate_metrics_tensorflow(classifier, dataset) + log_metrics(metrics) + +@pyplugs.register +def augment_data(): + pass + +@pyplugs.register +def predict(): + pass diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_rng.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_rng.py new file mode 100644 index 000000000..d10b2bd60 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_rng.py @@ -0,0 +1,56 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for managing random number generators.""" + +from __future__ import annotations + +from typing import Tuple + +import numpy as np +import structlog +from numpy.random._generator import Generator as RNGenerator +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +@pyplugs.task_nout(2) +def init_rng(seed: int = -1) -> Tuple[int, RNGenerator]: + """Constructs a new random number generator. + + Args: + seed: A seed to initialize the random number generator. If the value is less + than zero, then the seed is generated by pulling fresh, unpredictable + entropy from the OS. The default is `-1`. + + Returns: + A tuple containing the seed and the initialized random number generator. If a + `seed < 0` was passed as an argument, then the seed generated by the OS will be + returned. + + See Also: + - :py:func:`numpy.random.default_rng` + """ + rng = np.random.default_rng(seed if seed >= 0 else None) + + if seed < 0: + seed = rng.bit_generator._seed_seq.entropy # type: ignore[attr-defined] + + return int(seed), rng diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_sample.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_sample.py new file mode 100644 index 000000000..33c13d5d5 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/random_sample.py @@ -0,0 +1,89 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for drawing random samples.""" + +from __future__ import annotations + +from typing import Optional, Tuple, Union + +import numpy as np +import structlog +from numpy.random._generator import Generator as RNGenerator +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def draw_random_integer(rng: RNGenerator, low: int = 0, high: int = 2**31 - 1) -> int: + """Returns a random integer from `low` (inclusive) to `high` (exclusive). + + The integer is sampled from a uniform distribution. + + Args: + rng: A random number generator returned by :py:func:`~.rng.init_rng`. + low: Lowest (signed) integers to be drawn from the distribution (unless + `high=None`, in which case this parameter is `0` and this value is used for + `high`). + high: If not `None`, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if `high=None`) + + Returns: + A random integer. + + See Also: + - :py:meth:`numpy.random.Generator.integers` + """ + result: int = int(rng.integers(low=low, high=high)) + + return result + + +@pyplugs.register +def draw_random_integers( + rng: RNGenerator, + low: int = 0, + high: int = 2**31 - 1, + size: Optional[Union[int, Tuple[int, ...]]] = None, +) -> np.ndarray: + """Returns random integers from `low` (inclusive) to `high` (exclusive). + + The integers are sampled from a uniform distribution. + + Args: + rng: A random number generator returned by :py:func:`~.rng.init_rng`. + low: Lowest (signed) integers to be drawn from the distribution (unless + `high=None`, in which case this parameter is `0` and this value is used for + `high`). + high: If not `None`, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if `high=None`). + size: The output shape of array. If the given shape is, e.g., `(m, n, k)`, then + `m * n * k` samples are drawn. If `None`, a single value is returned. The + default is `None`. + + Returns: + A `size`-shaped array of random integers. + + See Also: + - :py:meth:`numpy.random.Generator.integers` + """ + size = size or 1 + result: np.ndarray = rng.integers(low=low, high=high, size=size) + + return result diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py new file mode 100644 index 000000000..7286cf002 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py @@ -0,0 +1,107 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for interfacing the |ART| with the MLFlow model registry. + +.. |ART| replace:: `Adversarial Robustness Toolbox\ + `__ +""" + +from __future__ import annotations + +from typing import Any, Dict, Optional + +import numpy as np +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +from .registry_mlflow import load_tensorflow_keras_classifier + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.estimators.classification import TensorFlowV2Classifier + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras import losses + from tensorflow.keras.models import Sequential + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def load_wrapped_tensorflow_keras_classifier( + artifact_uri: str, + imagenet_preprocessing: bool = False, + classifier_kwargs: Optional[Dict[str, Any]] = None, +) -> TensorFlowV2Classifier: + """Loads and wraps a registered Keras classifier for compatibility with the |ART|. + + Args: + name: The name of the registered model in the MLFlow model registry. + version: The version number of the registered model in the MLFlow registry. + classifier_kwargs: A dictionary mapping argument names to values which will + be passed to the TensorFlowV2Classifier constructor. + Returns: + A trained :py:class:`~art.estimators.classification.TensorFlowV2Classifier` + object. + + See Also: + - :py:class:`art.estimators.classification.TensorFlowV2Classifier` + - :py:func:`.mlflow.load_tensorflow_keras_classifier` + """ + classifier_kwargs = classifier_kwargs or {} + keras_classifier: Sequential = load_tensorflow_keras_classifier( + uri=artifact_uri + ) + nb_classes = keras_classifier.output_shape[1] + input_shape = keras_classifier.input_shape + loss_object = losses.get(keras_classifier.loss) + preprocessing = ( + (np.array([103.939, 116.779, 123.680]), np.array([1.0, 1.0, 1.0])) + if imagenet_preprocessing + else None + ) + wrapped_keras_classifier: TensorFlowV2Classifier = TensorFlowV2Classifier( + model=keras_classifier, + nb_classes=nb_classes, + input_shape=input_shape, + loss_object=loss_object, + preprocessing=preprocessing, + **classifier_kwargs, + ) + LOGGER.info( + "Wrap Keras classifier for compatibility with Adversarial Robustness Toolbox" + ) + + return wrapped_keras_classifier diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_mlflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_mlflow.py new file mode 100644 index 000000000..23d8519aa --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_mlflow.py @@ -0,0 +1,120 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for using the MLFlow model registry.""" + +from __future__ import annotations + +from typing import Optional + +import mlflow +import structlog +from mlflow.entities import Run as MlflowRun +from mlflow.entities.model_registry import ModelVersion +from mlflow.tracking import MlflowClient +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.models import Sequential + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +def add_model_to_registry( + active_run: MlflowRun, name: str, model_dir: str +) -> Optional[ModelVersion]: + """Registers a trained model logged during the current run to the MLFlow registry. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + name: The registration name to use for the model. + model_dir: The relative artifact directory where MLFlow logged the model trained + during the current run. + + Returns: + A :py:class:`~mlflow.entities.model_registry.ModelVersion` object created by the + backend. + """ + if not name.strip(): + return None + + run_id: str = active_run.info.run_id + artifact_uri: str = active_run.info.artifact_uri + source: str = f"{artifact_uri}/{model_dir}" + + registered_models = [x.name for x in MlflowClient().search_registered_models()] + + if name not in registered_models: + LOGGER.info("create registered model", name=name) + MlflowClient().create_registered_model(name=name) + + LOGGER.info("create model version", name=name, source=source, run_id=run_id) + model_version: ModelVersion = MlflowClient().create_model_version( + name=name, source=source, run_id=run_id + ) + + return model_version + + +@pyplugs.register +def get_experiment_name(active_run: MlflowRun) -> str: + """Gets the name of the experiment for the current run. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + + Returns: + The name of the experiment. + """ + experiment_name: str = ( + MlflowClient().get_experiment(active_run.info.experiment_id).name + ) + LOGGER.info( + "Obtained experiment name of active run", experiment_name=experiment_name + ) + + return experiment_name + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def load_tensorflow_keras_classifier(uri: str) -> Sequential: + """Loads a registered Keras classifier. + + Args: + name: The name of the registered model in the MLFlow model registry. + version: The version number of the registered model in the MLFlow registry. + + Returns: + A trained :py:class:`tf.keras.Sequential` object. + """ + LOGGER.info("Load Keras classifier from model registry", uri=uri) + + return mlflow.keras.load_model(model_uri=uri) + \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py new file mode 100644 index 000000000..52dfeea26 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py @@ -0,0 +1,84 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +from types import FunctionType +from typing import Any, Dict, List, Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +from . import import_keras + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.callbacks import Callback + from tensorflow.keras.metrics import Metric + from tensorflow.keras.optimizers import Optimizer + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def evaluate_metrics_tensorflow(classifier, dataset) -> Dict[str, float]: + result = classifier.evaluate(dataset, verbose=0, return_dict=True) + return result + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_optimizer(optimizer: str, learning_rate: float) -> Optimizer: + return import_keras.get_optimizer(optimizer)(learning_rate) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_model_callbacks(callbacks_list: List[Dict[str, Any]]) -> List[Callback]: + return [ + import_keras.get_callback(callback["name"])(**callback.get("parameters", {})) + for callback in callbacks_list + ] + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_performance_metrics( + metrics_list: List[Dict[str, Any]] +) -> List[Union[Metric, FunctionType]]: + performance_metrics: List[Metric] = [] + + for metric in metrics_list: + new_metric: Union[Metric, FunctionType] = import_keras.get_metric( + metric["name"] + ) + performance_metrics.append( + new_metric(**metric.get("parameters")) + if not isinstance(new_metric, FunctionType) and metric.get("parameters") + else new_metric + ) + + return performance_metrics diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tracking_mlflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tracking_mlflow.py new file mode 100644 index 000000000..f7f878cd5 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tracking_mlflow.py @@ -0,0 +1,99 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for using the MLFlow Tracking service.""" + +from __future__ import annotations + +from typing import Dict + +import mlflow +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.models import Sequential + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +def log_metrics(metrics: Dict[str, float]) -> None: + """Logs metrics to the MLFlow Tracking service for the current run. + + Args: + metrics: A dictionary with the metrics to be logged. The keys are the metric + names and the values are the metric values. + + See Also: + - :py:func:`mlflow.log_metric` + """ + for metric_name, metric_value in metrics.items(): + mlflow.log_metric(key=metric_name, value=metric_value) + LOGGER.info( + "Log metric to MLFlow Tracking server", + metric_name=metric_name, + metric_value=metric_value, + ) + + +@pyplugs.register +def log_parameters(parameters: Dict[str, float]) -> None: + """Logs parameters to the MLFlow Tracking service for the current run. + + Parameters can only be set once per run. + + Args: + parameters: A dictionary with the parameters to be logged. The keys are the + parameter names and the values are the parameter values. + + See Also: + - :py:func:`mlflow.log_param` + """ + for parameter_name, parameter_value in parameters.items(): + mlflow.log_param(key=parameter_name, value=parameter_value) + LOGGER.info( + "Log parameter to MLFlow Tracking server", + parameter_name=parameter_name, + parameter_value=parameter_value, + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def log_tensorflow_keras_estimator(estimator: Sequential, model_dir: str) -> None: + """Logs a Keras estimator trained during the current run to the MLFlow registry. + + Args: + estimator: A trained Keras estimator. + model_dir: The relative artifact directory where MLFlow should save the + model. + """ + mlflow.keras.log_model(model=estimator, artifact_path=model_dir) + LOGGER.info( + "Tensorflow Keras model logged to tracking server", + model_dir=model_dir, + ) diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py b/examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py new file mode 100644 index 000000000..57d002ce1 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py @@ -0,0 +1,23 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module of exceptions for the artifacts plugins collection.""" + +from dioptra.sdk.exceptions.base import BaseTaskPluginError + + +class UnsupportedDataFrameFileFormatError(BaseTaskPluginError): + """The requested data frame file format is not supported.""" diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py b/examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py new file mode 100644 index 000000000..e2a77af4a --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py @@ -0,0 +1,241 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for MLFlow artifacts management. + +This module contains a set of task plugins for managing artifacts generated during an +entry point run. +""" + +import tarfile +from pathlib import Path +from typing import Any, Callable, Dict, Optional, Union + +import mlflow +import os +import pandas as pd +import structlog +from mlflow.tracking import MlflowClient +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.utilities.paths import set_path_ext + +from .artifacts_restapi import upload_artifact_to_restapi, get_artifacts_for_job +from .artifacts_exceptions import UnsupportedDataFrameFileFormatError + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def download_all_artifacts_for_job( + job_id: str, artifact_path: str, destination_path: Optional[str] = None +) -> str: + """Downloads an artifact file or directory from a previous MLFlow run. + + Args: + run_id: The unique identifier of a previous MLFlow run. + artifact_path: The relative source path to the desired artifact. + destination_path: The relative destination path where the artifacts will be + downloaded. If `None`, the artifacts will be downloaded to a new + uniquely-named directory on the local filesystem. The default is `None`. + + Returns: + A string pointing to the directory containing the downloaded artifacts. + + See Also: + - :py:meth:`mlflow.tracking.MlflowClient.download_artifacts` + """ + uris = get_artifacts_for_job(job_id) + for uri in uris: + if uri.endswith(artifact_path): + download_path: str = mlflow.artifacts.download_artifacts( + artifact_uri=uri, dst_path=destination_path + ) + LOGGER.info( + "Artifact downloaded from MLFlow run", + job_id=job_id, + artifact_path=artifact_path, + destination_path=download_path, + ) + + return download_path + + +@pyplugs.register +def upload_data_frame_artifact( + data_frame: pd.DataFrame, + file_name: str, + file_format: str, + file_format_kwargs: Optional[Dict[str, Any]] = None, + working_dir: Optional[Union[str, Path]] = None, +) -> None: + """Uploads a :py:class:`~pandas.DataFrame` as an artifact of the active MLFlow run. + + The `file_format` argument selects the :py:class:`~pandas.DataFrame` serializer, + which are all handled using the object's `DataFrame.to_{format}` methods. The string + passed to `file_format` must match one of the following, + + - `csv[.bz2|.gz|.xz]` - A comma-separated values plain text file with optional + compression. + - `feather` - A binary feather file. + - `json` - A plain text JSON file. + - `pickle` - A binary pickle file. + + Args: + data_frame: A :py:class:`~pandas.DataFrame` to be uploaded. + file_name: The filename to use for the serialized :py:class:`~pandas.DataFrame`. + file_format: The :py:class:`~pandas.DataFrame` file serialization format. + file_format_kwargs: A dictionary of additional keyword arguments to pass to the + serializer. If `None`, then no additional keyword arguments are passed. The + default is `None`. + working_dir: The location where the file should be saved. If `None`, then the + current working directory is used. The default is `None`. + + Notes: + The :py:mod:`pyarrow` package must be installed in order to serialize to the + feather format. + + See Also: + - :py:meth:`pandas.DataFrame.to_csv` + - :py:meth:`pandas.DataFrame.to_feather` + - :py:meth:`pandas.DataFrame.to_json` + - :py:meth:`pandas.DataFrame.to_pickle` + """ + + def to_format( + data_frame: pd.DataFrame, format: str, output_dir: Union[str, Path] + ) -> Dict[str, Any]: + filepath: Path = Path(output_dir) / Path(file_name).name + format_funcs = { + "csv": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv"), + }, + "csv.bz2": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv.bz2"), + }, + "csv.gz": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv.gz"), + }, + "csv.xz": { + "func": data_frame.to_csv, + "filepath": set_path_ext(filepath=filepath, ext="csv.xz"), + }, + "feather": { + "func": data_frame.to_feather, + "filepath": set_path_ext(filepath=filepath, ext="feather"), + }, + "json": { + "func": data_frame.to_json, + "filepath": set_path_ext(filepath=filepath, ext="json"), + }, + "pickle": { + "func": data_frame.to_pickle, + "filepath": set_path_ext(filepath=filepath, ext="pkl"), + }, + } + + func: Optional[Dict[str, Any]] = format_funcs.get(format) + + if func is None: + raise UnsupportedDataFrameFileFormatError( + f"Serializing data frames to the {file_format} format is not supported" + ) + + return func + + if file_format_kwargs is None: + file_format_kwargs = {} + + if working_dir is None: + working_dir = Path.cwd() + + working_dir = Path(working_dir) + format_dict: Dict[str, Any] = to_format( + data_frame=data_frame, format=file_format, output_dir=working_dir + ) + + df_to_format_func: Callable[..., None] = format_dict["func"] + df_artifact_path: Path = format_dict["filepath"] + + df_to_format_func(df_artifact_path, **file_format_kwargs) + LOGGER.info( + "Data frame saved to file", + file_name=df_artifact_path.name, + file_format=file_format, + ) + + upload_file_as_artifact(artifact_path=df_artifact_path) + + +@pyplugs.register +def upload_directory_as_tarball_artifact( + source_dir: Union[str, Path], + tarball_filename: str, + tarball_write_mode: str = "w:gz", + working_dir: Optional[Union[str, Path]] = None, +) -> None: + """Archives a directory and uploads it as an artifact of the active MLFlow run. + + Args: + source_dir: The directory which should be uploaded. + tarball_filename: The filename to use for the archived directory tarball. + tarball_write_mode: The write mode for the tarball, see :py:func:`tarfile.open` + for the full list of compression options. The default is `"w:gz"` (gzip + compression). + working_dir: The location where the file should be saved. If `None`, then the + current working directory is used. The default is `None`. + + See Also: + - :py:func:`tarfile.open` + """ + if working_dir is None: + working_dir = Path.cwd() + + source_dir = Path(source_dir) + working_dir = Path(working_dir) + tarball_path = working_dir / tarball_filename + + with tarfile.open(tarball_path, tarball_write_mode) as f: + f.add(source_dir, arcname=source_dir.name) + + LOGGER.info( + "Directory added to tar archive", + directory=source_dir, + tarball_path=tarball_path, + ) + + upload_file_as_artifact(artifact_path=tarball_path) + + +@pyplugs.register +def upload_file_as_artifact(artifact_path: Union[str, Path]) -> None: + """Uploads a file as an artifact of the active MLFlow run. + + Args: + artifact_path: The location of the file to be uploaded. + + See Also: + - :py:func:`mlflow.log_artifact` + """ + artifact_path = Path(artifact_path) + mlflow.log_artifact(str(artifact_path)) + uri = mlflow.get_artifact_uri(str(artifact_path.name)) + upload_artifact_to_restapi(uri, os.environ['__JOB_ID']) + LOGGER.info("Artifact uploaded for current MLFlow run", filename=artifact_path.name) diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py b/examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py new file mode 100644 index 000000000..9847e45ed --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py @@ -0,0 +1,151 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +import requests +import structlog + +from dioptra import pyplugs +from structlog.stdlib import BoundLogger +from posixpath import join as urljoin +from urllib.parse import urlparse, urlunparse + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +@pyplugs.register +def get_uri_for_artifact(job_id, index=0): + session, url = get_logged_in_session() + job = get(session, url, 'jobs', str(job_id)) + uri = job['artifacts'][index]['artifactUri'] + return uri +def get_artifacts_for_job(job_id): + session, url = get_logged_in_session() + job = get(session, url, 'jobs', str(job_id)) + return [artifact['artifactUri'] for artifact in job['artifacts']] + +def get_logged_in_session(): + session = requests.Session() + url = "http://dioptra-deployment-restapi:5000/api/v1" + + login = post(session, url, {'username':'pluginuser', 'password':'pleasemakesuretoPLUGINthecomputer'}, 'auth', 'login') + LOGGER.info("login request sent", response=str(login)) + + return session, url + +def upload_model_to_restapi(name, source_uri, job_id): + version = 0 + model_id = 0 + + session, url = get_logged_in_session() + + models = get(session, url, f'models?search={name}&pageLength=500') + LOGGER.info("requesting models from RESTAPI", response=models) + + + for model in models['data']: + #check whether to create a new model + if model['name'] == name: + model_id = model['id'] + if model['latestVersion'] != None: + version = model['latestVersion']['versionNumber'] + 1 + if (version == 0 and model_id == 0): + LOGGER.info("creating new model on RESTAPI") + model = post(session, url, {"group": 1, "name": name, "description": f"{name} model"}, "models") + model_id = model['id'] + LOGGER.info("new model created", response=model) + + artifact = post(session, url, {"group": 1, "description": f"{name} model artifact", "job": str(job_id), "uri": source_uri}, 'artifacts') + LOGGER.info("artifact", response=artifact) + model_version = post(session, url, {"description": f"{name} model version", "artifact": artifact['id']}, 'models', str(model_id), 'versions') + LOGGER.info("model created", response=model_version) + +def upload_artifact_to_restapi(source_uri, job_id): + session, url = get_logged_in_session() + + artifact = post(session, url, {"group": 1, "description": f"artifact for job {job_id}", "job": str(job_id), "uri": source_uri}, 'artifacts') + LOGGER.info("artifact", response=artifact) + +def debug_request(url, method, data=None): + LOGGER.debug("Request made.", url=url, method=method, data=data) + + +def debug_response(json): + LOGGER.debug("Response received.", json=json) + + +def get(session, endpoint, *features): + debug_request(urljoin(endpoint, *features), "GET") + return make_request(session, "get", endpoint, None, *features) + + +def post(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "POST", data) + return make_request(session, "post", endpoint, data, *features) + + +def delete(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "DELETE", data) + return make_request(session, "delete", endpoint, data, *features) + + +def put(session, endpoint, data, *features): + debug_request(urljoin(endpoint, *features), "PUT", data) + return make_request(session, "put", endpoint, data, *features) + + +def make_request(session, method_name, endpoint, data, *features): + url = urljoin(endpoint, *features) + method = getattr(session, method_name) + try: + if data: + response = method(url, json=data) + else: + response = method(url) + if response.status_code != 200: + raise StatusCodeError() + json = response.json() + except (requests.ConnectionError, StatusCodeError, requests.JSONDecodeError) as e: + handle_error(session, url, method_name.upper(), data, response, e) + debug_response(json=json) + return json + + +def handle_error(session, url, method, data, response, error): + if type(error) is requests.ConnectionError: + restapi = os.environ["DIOPTRA_RESTAPI_URI"] + message = ( + f"Could not connect to the REST API. Is the server running at {restapi}?" + ) + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise APIConnectionError(message) + if type(error) is StatusCodeError: + message = f"Error code {response.status_code} returned." + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise StatusCodeError(message) + if type(error) is requests.JSONDecodeError: + message = "JSON response could not be decoded." + LOGGER.error(message, url=url, method=method, data=data, response=response.text) + raise JSONDecodeError(message) + +class APIConnectionError(Exception): + """Class for connection errors""" + + +class StatusCodeError(Exception): + """Class for status code errors""" + + +class JSONDecodeError(Exception): + """Class for JSON decode errors""" diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_utils.py b/examples/task-plugins/dioptra_custom/vc/artifacts_utils.py new file mode 100644 index 000000000..37404c7ae --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/artifacts_utils.py @@ -0,0 +1,117 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module containing generic utilities for managing artifacts.""" + +from __future__ import annotations + +import os +import tarfile +import uuid +from pathlib import Path +from tarfile import TarFile +from typing import Any, List, Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +def is_within_directory(directory: Union[str, Path], target: Union[str, Path]) -> bool: + abs_directory = os.path.abspath(directory) + abs_target = os.path.abspath(target) + + prefix = os.path.commonprefix([abs_directory, abs_target]) + + return prefix == abs_directory + + +def safe_extract(tar: TarFile, path: Union[str, Path] = ".") -> None: + for member in tar.getmembers(): + member_path = os.path.join(path, member.name) + if not is_within_directory(path, member_path): + raise Exception("Attempted Path Traversal in Tar File") + + tar.extractall(path, members=None, numeric_owner=False) + + +@pyplugs.register +def extract_tarfile( + filepath: Union[str, Path], + tarball_read_mode: str = "r:gz", + output_dir: Any = None, +) -> None: + """Extracts a tarball archive into the current working directory. + + Args: + filepath: The location of the tarball archive file provided as a string or a + :py:class:`~pathlib.Path` object. + tarball_read_mode: The read mode for the tarball, see :py:func:`tarfile.open` + for the full list of compression options. The default is `"r:gz"` (gzip + compression). + + See Also: + - :py:func:`tarfile.open` + """ + output_dir = Path(output_dir) if output_dir is not None else Path.cwd() + + filepath = Path(filepath) + with tarfile.open(filepath, tarball_read_mode) as f: + safe_extract(f, path=output_dir) + + +@pyplugs.register +def make_directories(dirs: List[Union[str, Path]]) -> None: + """Creates directories if they do not exist. + + Args: + dirs: A list of directories provided as strings or :py:class:`~pathlib.Path` + objects. + """ + for d in dirs: + d = Path(d) + d.mkdir(parents=True, exist_ok=True) + LOGGER.info("Directory created", directory=d) + + +@pyplugs.register +def extract_tarfile_in_unique_subdir( + filepath: Union[str, Path], + tarball_read_mode: str = "r:gz", +) -> Path: + """Extracts a tarball archive into a unique subdirectory of the + current working directory. + + Args: + filepath: The location of the tarball archive file provided as a string or a + :py:class:`~pathlib.Path` object. + tarball_read_mode: The read mode for the tarball, see :py:func:`tarfile.open` + for the full list of compression options. The default is `"r:gz"` (gzip + compression). + + See Also: + - :py:func:`tarfile.open` + """ + output_dir = Path(uuid.uuid4().hex) + output_dir.mkdir(mode=0o700, parents=True, exist_ok=True) + + filepath = Path(filepath) + with tarfile.open(filepath, tarball_read_mode) as f: + safe_extract(f, path=output_dir) + return output_dir diff --git a/examples/task-plugins/dioptra_custom/vc/attacks_fgm.py b/examples/task-plugins/dioptra_custom/vc/attacks_fgm.py new file mode 100644 index 000000000..b11282ea7 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/attacks_fgm.py @@ -0,0 +1,305 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for the Fast Gradient Method evasion attack. + +The Fast Gradient Method (FGM) [goodfellow2015]_ is an evasion attack that attempts to +fool a trained classifier by perturbing a test image using the gradient of the +classifier's neural network. This task plugin uses the Adversarial Robustness Toolbox's +[art2019]_ implementation of the |fgm_art|. + +References: + .. [art2019] M.-I. Nicolae et al., "Adversarial Robustness Toolbox v1.0.0," + Nov. 2019. [Online]. Available: + `arXiv:1807.01069v4 [cs.LG] `_. + + .. [goodfellow2015] I. Goodfellow, J. Shlens, and C. Szegedy. (May 2015). + Explaining and Harnessing Adversarial Examples, Presented at the Int. Conf. + on Learn. Represent. 2015, San Diego, California, United States. [Online]. + Available: `arXiv:1412.6572v3 [stat.ML] `_. + +.. |fgm_art| replace:: `Fast Gradient Method `__ +""" + +from __future__ import annotations + +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple, Union + +import mlflow +import numpy as np +import pandas as pd +import scipy.stats +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.attacks.evasion import FastGradientMethod + from art.estimators.classification import TensorFlowV2Classifier + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_adversarial_fgm_dataset( + data_dir: str, + adv_data_dir: Union[str, Path], + keras_classifier: TensorFlowV2Classifier, + image_size: Tuple[int, int, int], + distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + rescale: float = 1.0 / 255, + batch_size: int = 32, + label_mode: str = "categorical", + eps: float = 0.3, + eps_step: float = 0.1, + minimal: bool = False, + norm: Union[int, float, str] = np.inf, +) -> pd.DataFrame: + """Generates an adversarial dataset using the Fast Gradient Method attack. + + Each generated adversarial image is saved as an image file in the directory + specified by `adv_data_dir` and the distance metric functions passed to + `distance_metrics_list` are used to quantify the size of the perturbation applied to + each image. + + Args: + data_dir: The directory containing the clean test images. + adv_data_dir: The directory to use when saving the generated adversarial images. + keras_classifier: A trained :py:class:`~art.estimators.classification\\ + .TensorFlowV2Classifier`. + image_size: A tuple of integers `(height, width, channels)` used to preprocess + the images so that they all have the same dimensions and number of color + channels. `channels=3` means RGB color images and `channels=1` means + grayscale images. Images with different dimensions will be resized. If + `channels=1`, color images will be converted into grayscale. + distance_metrics_list: A list of distance metrics to compute after generating an + adversarial image. If `None`, then no distance metrics will be calculated. + The default is `None`. + rescale: The rescaling factor for the pixel vectors. If `None` or `0`, no + rescaling is applied, otherwise multiply the data by the value provided + (after applying all other transformations). The default is `1.0 / 255`. + batch_size: The size of the batch on which adversarial samples are generated. + The default is `32`. + label_mode: Determines how the label arrays for the dataset will be returned. + The available choices are: `"categorical"`, `"binary"`, `"sparse"`, + `"input"`, `None`. For information on the meaning of each choice, see + the documentation for |flow_from_directory|. The default is `"categorical"`. + eps: The attack step size. The default is `0.3`. + eps_step: The step size of the input variation for minimal perturbation + computation. The default is `0.1`. + minimal: If `True`, compute the minimal perturbation, and use `eps_step` for the + step size and `eps` for the maximum perturbation. The default is `False`. + norm: The norm of the adversarial perturbation. Can be `"inf"`, + :py:data:`numpy.inf`, `1`, or `2`. The default is :py:data:`numpy.inf`. + + Returns: + A :py:class:`~pandas.DataFrame` containing the full distribution of the + calculated distance metrics. + + See Also: + - |flow_from_directory| + + .. |flow_from_directory| replace:: :py:meth:`tf.keras.preprocessing.image\\ + .ImageDataGenerator.flow_from_directory` + """ + distance_metrics_list = distance_metrics_list or [] + color_mode: str = "color" if image_size[2] == 3 else "grayscale" + target_size: Tuple[int, int] = image_size[:2] + adv_data_dir = Path(adv_data_dir) + + attack = _init_fgm( + keras_classifier=keras_classifier, + batch_size=batch_size, + eps=eps, + eps_step=eps_step, + minimal=minimal, + norm=norm, + ) + + data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) + + data_flow = data_generator.flow_from_directory( + directory=data_dir, + target_size=target_size, + color_mode=color_mode, + class_mode=label_mode, + batch_size=batch_size, + shuffle=False, + ) + num_images = data_flow.n + img_filenames = [Path(x) for x in data_flow.filenames] + + distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} + for metric_name, _ in distance_metrics_list: + distance_metrics_[metric_name] = [] + + LOGGER.info( + "Generate adversarial images", + attack="fgm", + num_batches=num_images // batch_size, + ) + + for batch_num, (x, y) in enumerate(data_flow): + if batch_num >= num_images // batch_size: + break + + clean_filenames = img_filenames[ + batch_num * batch_size : (batch_num + 1) * batch_size # noqa: E203 + ] + + LOGGER.info( + "Generate adversarial image batch", + attack="fgm", + batch_num=batch_num, + ) + + y_int = np.argmax(y, axis=1) + adv_batch = attack.generate(x=x) + + _save_adv_batch(adv_batch, adv_data_dir, y_int, clean_filenames) + + _evaluate_distance_metrics( + clean_filenames=clean_filenames, + distance_metrics_=distance_metrics_, + clean_batch=x, + adv_batch=adv_batch, + distance_metrics_list=distance_metrics_list, + ) + + LOGGER.info("Adversarial image generation complete", attack="fgm") + _log_distance_metrics(distance_metrics_) + + return pd.DataFrame(distance_metrics_) + + +def _init_fgm( + keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs +) -> FastGradientMethod: + """Initializes :py:class:`~art.attacks.evasion.FastGradientMethod`. + + Args: + keras_classifier: A trained :py:class:`~art.estimators.classification\\ + .TensorFlowV2Classifier`. + batch_size: The size of the batch on which adversarial samples are generated. + + Returns: + A :py:class:`~art.attacks.evasion.FastGradientMethod` object. + """ + attack: FastGradientMethod = FastGradientMethod( + estimator=keras_classifier, batch_size=batch_size, **kwargs + ) + return attack + + +def _save_adv_batch(adv_batch, adv_data_dir, y, clean_filenames) -> None: + """Saves a batch of adversarial images to disk. + + Args: + adv_batch: A generated batch of adversarial images. + adv_data_dir: The directory to use when saving the generated adversarial images. + y: An array containing the target labels of the original images. + clean_filenames: A list containing the filenames of the original images. + """ + for batch_image_num, adv_image in enumerate(adv_batch): + adv_image_path = ( + adv_data_dir + / f"{y[batch_image_num]}" + / f"adv_{clean_filenames[batch_image_num].name}" + ) + + if not adv_image_path.parent.exists(): + adv_image_path.parent.mkdir(parents=True) + + save_img(path=str(adv_image_path), x=adv_image) + + +def _evaluate_distance_metrics( + clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list +) -> None: + """Calculates distance metrics for a batch of clean/adversarial image pairs. + + Args: + clean_filenames: A list containing the filenames of the original images. + distance_metrics_: A dictionary used to record the values of the distance + metrics computed for the clean/adversarial image pairs. + clean_batch: The clean images used to generate the adversarial images in + `adv_batch`. + adv_batch: A generated batch of adversarial images. + distance_metrics_list: A list of distance metrics to compute after generating an + adversarial image. + """ + LOGGER.debug("evaluate image perturbations using distance metrics") + distance_metrics_["image"].extend([x.name for x in clean_filenames]) + distance_metrics_["label"].extend([x.parent for x in clean_filenames]) + for metric_name, metric in distance_metrics_list: + distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) + + +def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: + """Logs the distance metrics summary statistics to the MLFlow Tracking service. + + The following summary statistics are calculated and logged to the MLFlow Tracking + service for each of the distributions recorded in the `distance_metrics_` + dictionary: + + - mean + - median + - standard deviation + - interquartile range + - minimum + - maximum + + Args: + distance_metrics_: A dictionary used to record the values of the distance + metrics computed for the clean/adversarial image pairs. + """ + distance_metrics_ = distance_metrics_.copy() + del distance_metrics_["image"] + del distance_metrics_["label"] + for metric_name, metric_values_list in distance_metrics_.items(): + metric_values = np.array(metric_values_list) + mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) + mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) + mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) + mlflow.log_metric( + key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) + ) + mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) + mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) + LOGGER.info("logged distance-based metric", metric_name=metric_name) diff --git a/examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py b/examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py new file mode 100644 index 000000000..10ca767e5 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py @@ -0,0 +1,52 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for initializing and configuring Tensorflow.""" + +from __future__ import annotations + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +try: + import tensorflow as tf + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def init_tensorflow(seed: int) -> None: + """Initializes Tensorflow to ensure reproducibility. + + This task plugin **must** be run before any other features from Tensorflow are used + to ensure reproducibility. + + Args: + seed: The seed to use for Tensorflow's random number generator. + """ + tf.random.set_seed(seed) diff --git a/examples/task-plugins/dioptra_custom/vc/builtin.py b/examples/task-plugins/dioptra_custom/vc/builtin.py new file mode 100644 index 000000000..f477ecc58 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/builtin.py @@ -0,0 +1,208 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple, Union + +import mlflow +import numpy as np +import pandas as pd +import scipy.stats +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.attacks.evasion import FastGradientMethod + from art.estimators.classification import TensorFlowV2Classifier + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_adversarial_fgm_dataset( + data_dir: str, + adv_data_dir: Union[str, Path], + keras_classifier: TensorFlowV2Classifier, + image_size: Tuple[int, int, int], + distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + rescale: float = 1.0 / 255, + batch_size: int = 32, + label_mode: str = "categorical", + eps: float = 0.3, + eps_step: float = 0.1, + minimal: float = 0, + norm: float = np.inf, + target_index: int = -1, + targeted: bool = False, +) -> pd.DataFrame: + distance_metrics_list = distance_metrics_list or [] + color_mode: str = "rgb" if image_size[2] == 3 else "grayscale" + target_size: Tuple[int, int] = image_size[:2] + adv_data_dir = Path(adv_data_dir) + + attack = _init_fgm( + keras_classifier=keras_classifier, + batch_size=batch_size, + eps=eps, + eps_step=eps_step, + minimal=minimal, + norm=norm, + targeted=targeted, + ) + + data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) + + data_flow = data_generator.flow_from_directory( + directory=data_dir, + target_size=target_size, + color_mode=color_mode, + class_mode=label_mode, + batch_size=batch_size, + shuffle=False, + ) + n_classes = len(data_flow.class_indices) + num_images = data_flow.n + img_filenames = [Path(x) for x in data_flow.filenames] + class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) + + distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} + for metric_name, _ in distance_metrics_list: + distance_metrics_[metric_name] = [] + + LOGGER.info( + "Generate adversarial images", + attack="fgm", + num_batches=num_images // batch_size, + ) + + for batch_num, (x, y) in enumerate(data_flow): + if batch_num >= num_images // batch_size: + break + + clean_filenames = img_filenames[ + batch_num * batch_size : (batch_num + 1) * batch_size # noqa: E203 + ] + + LOGGER.info( + "Generate adversarial image batch", + attack="fgm", + batch_num=batch_num, + ) + + y_int = np.argmax(y, axis=1) + if target_index >= 0: + y_one_hot = np.zeros(n_classes) + y_one_hot[target_index] = 1.0 + y_target = np.tile(y_one_hot, (x.shape[0], 1)) + + adv_batch = attack.generate(x=x, y=y_target) + else: + adv_batch = attack.generate(x=x) + + _save_adv_batch( + adv_batch, adv_data_dir, y_int, clean_filenames, class_names_list + ) + + _evaluate_distance_metrics( + clean_filenames=clean_filenames, + distance_metrics_=distance_metrics_, + clean_batch=x, + adv_batch=adv_batch, + distance_metrics_list=distance_metrics_list, + ) + + LOGGER.info("Adversarial image generation complete", attack="fgm") + _log_distance_metrics(distance_metrics_) + + return pd.DataFrame(distance_metrics_) + + +def _init_fgm( + keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs +) -> FastGradientMethod: + attack: FastGradientMethod = FastGradientMethod( + estimator=keras_classifier, batch_size=batch_size, **kwargs + ) + return attack + + +def _save_adv_batch( + adv_batch, adv_data_dir, y, clean_filenames, class_names_list +) -> None: + for batch_image_num, adv_image in enumerate(adv_batch): + out_label = class_names_list[y[batch_image_num]] + adv_image_path = ( + adv_data_dir + / f"{out_label}" + / f"adv_{clean_filenames[batch_image_num].name}" + ) + + if not adv_image_path.parent.exists(): + adv_image_path.parent.mkdir(parents=True) + + save_img(path=str(adv_image_path), x=adv_image) + + +def _evaluate_distance_metrics( + clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list +) -> None: + LOGGER.debug("evaluate image perturbations using distance metrics") + distance_metrics_["image"].extend([x.name for x in clean_filenames]) + distance_metrics_["label"].extend([x.parent for x in clean_filenames]) + for metric_name, metric in distance_metrics_list: + distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) + + +def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: + distance_metrics_ = distance_metrics_.copy() + del distance_metrics_["image"] + del distance_metrics_["label"] + for metric_name, metric_values_list in distance_metrics_.items(): + metric_values = np.array(metric_values_list) + mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) + mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) + mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) + mlflow.log_metric( + key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) + ) + mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) + mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) + LOGGER.info("logged distance-based metric", metric_name=metric_name) + diff --git a/examples/task-plugins/dioptra_custom/vc/data_tensorflow.py b/examples/task-plugins/dioptra_custom/vc/data_tensorflow.py new file mode 100644 index 000000000..facabcaa0 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/data_tensorflow.py @@ -0,0 +1,128 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for preparing Tensorflow-specific dataset iterators. + +.. |flow_from_directory| replace:: :py:meth:`tensorflow.keras.preprocessing.image\\ + .ImageDataGenerator.flow_from_directory` +.. |directory_iterator| replace:: :py:class:`~tensorflow.keras.preprocessing.image\\ + .DirectoryIterator` +""" + +from __future__ import annotations + +from typing import Optional, Tuple + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.preprocessing.image import ( + DirectoryIterator, + ImageDataGenerator, + ) + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_image_dataset( + data_dir: str, + subset: Optional[str], + image_size: Tuple[int, int, int], + seed: int, + rescale: float = 1.0 / 255, + validation_split: Optional[float] = 0.2, + batch_size: int = 32, + label_mode: str = "categorical", +) -> DirectoryIterator: + """Yields an iterator for generating batches of real-time augmented image data. + + Args: + data_dir: The directory containing the image dataset. + subset: The subset of data (`"training"` or `"validation"`) to use if + `validation_split` is not `None`. If `None`, then `validation_split` must + also be `None`. + image_size: A tuple of integers `(height, width, channels)` used to preprocess + the images so that they all have the same dimensions and number of color + channels. `channels=3` means RGB color images and `channels=1` means + grayscale images. Images with different dimensions will be resized. If + `channels=1`, color images will be converted into grayscale. + seed: Sets the random seed used for shuffling and transformations. + rescale: The rescaling factor for the pixel vectors. If `None` or `0`, no + rescaling is applied, otherwise multiply the data by the value provided + (after applying all other transformations). The default is `1.0 / 255`. + validation_split: The fraction of the data to set aside for validation. If not + `None`, the value given here must be between `0` and `1`. If `None`, then + there is no validation set. The default is `0.2`. + batch_size: The size of the batch on which adversarial samples are generated. + The default is `32`. + label_mode: Determines how the label arrays for the dataset will be returned. + The available choices are: `"categorical"`, `"binary"`, `"sparse"`, + `"input"`, `None`. For information on the meaning of each choice, see + the documentation for |flow_from_directory|. The default is `"categorical"`. + + Returns: + A :py:class:`~tensorflow.keras.preprocessing.image.DirectoryIterator` object. + + See Also: + - |flow_from_directory| + - :py:class:`~tensorflow.keras.preprocessing.image.DirectoryIterator` + """ + color_mode: str = ( + "rgb" if image_size[2] == 3 else "rgba" if image_size[2] == 4 else "grayscale" + ) + target_size: Tuple[int, int] = image_size[:2] + + data_generator: ImageDataGenerator = ImageDataGenerator( + rescale=rescale, + validation_split=validation_split, + ) + + return data_generator.flow_from_directory( + directory=data_dir, + target_size=target_size, + color_mode=color_mode, + class_mode=label_mode, + batch_size=batch_size, + seed=seed, + subset=subset, + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_n_classes_from_directory_iterator(ds: DirectoryIterator) -> int: + """Returns the number of unique labels found by the |directory_iterator|. + + Args: + ds: A |directory_iterator| object. + + Returns: + The number of unique labels in the dataset. + """ + return len(ds.class_indices) diff --git a/examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py b/examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py new file mode 100644 index 000000000..b696dad73 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py @@ -0,0 +1,201 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple, Union + +import mlflow +import numpy as np +import pandas as pd +import scipy.stats +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.defences.preprocessor import ( + GaussianAugmentation, + JpegCompression, + SpatialSmoothing, + ) + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + +DEFENSE_LIST = { + "spatial_smoothing": SpatialSmoothing, + "jpeg_compression": JpegCompression, + "gaussian_augmentation": GaussianAugmentation, +} + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_defended_dataset( + data_dir: str, + dataset_name: str, + def_data_dir: Union[str, Path], + image_size: Tuple[int, int, int], + distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + batch_size: int = 32, + label_mode: str = "categorical", + def_type: str = "spatial_smoothing", + **kwargs, +) -> pd.DataFrame: + distance_metrics_list = distance_metrics_list or [] + color_mode: str = "rgb" if image_size[2] == 3 else "grayscale" + rescale: float = 1.0 if image_size[2] == 3 else 1.0 / 255 + clip_values: Tuple[float, float] = (0, 255) if image_size[2] == 3 else (0, 1.0) + target_size: Tuple[int, int] = image_size[:2] + def_data_dir = Path(def_data_dir) + data_dir = Path(data_dir) / dataset_name + + defense = init_defense( + clip_values=clip_values, + def_type=def_type, + **kwargs, + ) + + data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) + + data_flow = data_generator.flow_from_directory( + directory=data_dir, + target_size=target_size, + color_mode=color_mode, + class_mode=label_mode, + batch_size=batch_size, + shuffle=False, + ) + num_images = data_flow.n + img_filenames = [Path(x) for x in data_flow.filenames] + class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) + + distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} + for metric_name, _ in distance_metrics_list: + distance_metrics_[metric_name] = [] + + LOGGER.info( + "Generate defended images", + defense=def_type, + num_batches=num_images // batch_size, + ) + + for batch_num, (x, y) in enumerate(data_flow): + if batch_num >= num_images // batch_size: + break + + clean_filenames = img_filenames[ + batch_num * batch_size : (batch_num + 1) * batch_size # noqa: E203 + ] + + LOGGER.info( + "Generate defended image batch", + defense=def_type, + batch_num=batch_num, + ) + + y_int = np.argmax(y, axis=1) + adv_batch_defend, _ = defense(x) + + _save_def_batch( + adv_batch_defend, def_data_dir, y_int, clean_filenames, class_names_list + ) + + _evaluate_distance_metrics( + clean_filenames=clean_filenames, + distance_metrics_=distance_metrics_, + clean_batch=x, + adv_batch=adv_batch_defend, + distance_metrics_list=distance_metrics_list, + ) + + LOGGER.info("Defended image generation complete", defense=def_type) + _log_distance_metrics(distance_metrics_) + + return pd.DataFrame(distance_metrics_) + + +def init_defense(clip_values, def_type, **kwargs): + defense = DEFENSE_LIST[def_type]( + clip_values=clip_values, + **kwargs, + ) + return defense + + +def _save_def_batch( + adv_batch, def_data_dir, y, clean_filenames, class_names_list +) -> None: + for batch_image_num, adv_image in enumerate(adv_batch): + out_label = class_names_list[y[batch_image_num]] + adv_image_path = ( + def_data_dir + / f"{out_label}" + / f"def_{clean_filenames[batch_image_num].name}" + ) + + if not adv_image_path.parent.exists(): + adv_image_path.parent.mkdir(parents=True) + + save_img(path=str(adv_image_path), x=adv_image) + + +def _evaluate_distance_metrics( + clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list +) -> None: + LOGGER.debug("evaluate image perturbations using distance metrics") + distance_metrics_["image"].extend([x.name for x in clean_filenames]) + distance_metrics_["label"].extend([x.parent for x in clean_filenames]) + for metric_name, metric in distance_metrics_list: + distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) + + +def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: + distance_metrics_ = distance_metrics_.copy() + del distance_metrics_["image"] + del distance_metrics_["label"] + for metric_name, metric_values_list in distance_metrics_.items(): + metric_values = np.array(metric_values_list) + mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) + mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) + mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) + mlflow.log_metric( + key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) + ) + mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) + mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) + LOGGER.info("logged distance-based metric", metric_name=metric_name) diff --git a/examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py b/examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py new file mode 100644 index 000000000..f5ef72548 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py @@ -0,0 +1,231 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""Neural network image classifiers implemented in Tensorflow/Keras.""" + +from __future__ import annotations + +from types import FunctionType +from typing import Callable, Dict, List, Tuple, Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.layers import ( + BatchNormalization, + Conv2D, + Dense, + Dropout, + Flatten, + MaxPooling2D, + ) + from tensorflow.keras.metrics import Metric + from tensorflow.keras.models import Sequential + from tensorflow.keras.optimizers import Optimizer + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def init_classifier( + model_architecture: str, + optimizer: Optimizer, + metrics: List[Union[Metric, FunctionType]], + input_shape: Tuple[int, int, int], + n_classes: int, + loss: str = "categorical_crossentropy", +) -> Sequential: + """Initializes an untrained neural network image classifier for Tensorflow/Keras. + + The `model_architecture` argument is used to select a neural network architecture + from the architecture registry. The string passed to `model_architecture` must match + one of the following, + + - `"shallow_net"` - A shallow neural network architecture. + - `"le_net"` - The LeNet-5 convolutional neural network architecture. + - `"alex_net"` - The AlexNet convolutional neural network architecture. + + Args: + model_architecture: The neural network architecture to use. + optimizer: A Keras :py:class:`~tf.keras.optimizers.Optimizer` providing an + algorithm to use to train the estimator, such as + :py:class:`~tf.keras.optimizers.SGD` and + :py:class:`~tf.keras.optimizers.Adam`. + metrics: A list of metrics to be evaluated by the model during training and + testing. + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + loss: A string specifying the loss function to be minimized during training. The + string must match the name of one of the loss functions in the + :py:mod:`tf.keras.losses` module. The default is + `"categorical_crossentropy"`. + + Returns: + A compiled :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:mod:`tf.keras.losses` + - :py:mod:`tf.keras.optimizers` + - :py:class:`tf.keras.Sequential` + """ + classifier: Sequential = KERAS_CLASSIFIERS_REGISTRY[model_architecture]( + input_shape, + n_classes, + ) + classifier.compile(loss=loss, optimizer=optimizer, metrics=metrics) + + return classifier + + +def shallow_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: + """Builds an untrained shallow neural network architecture for Tensorflow/Keras. + + Args: + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + + Returns: + A :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:class:`tf.keras.Sequential` + """ + model = Sequential() + + # Flatten inputs + model.add(Flatten(input_shape=input_shape)) + + # single hidden layer: + model.add(Dense(32, activation="sigmoid")) + + # output layer: + model.add(Dense(n_classes, activation="softmax")) + + return model + + +def le_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: + """Builds an untrained LeNet-5 neural network architecture for Tensorflow/Keras. + + Args: + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + + Returns: + A :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:class:`tf.keras.Sequential` + """ + model = Sequential() + + # first convolutional layer: + model.add( + Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape) + ) + + # second conv layer, with pooling and dropout: + model.add(Conv2D(64, kernel_size=(3, 3), activation="relu")) + model.add(MaxPooling2D(pool_size=(2, 2))) + model.add(Dropout(0.25)) + model.add(Flatten()) + + # dense hidden layer, with dropout: + model.add(Dense(128, activation="relu")) + model.add(Dropout(0.5)) + + # output layer: + model.add(Dense(n_classes, activation="softmax")) + + return model + + +def alex_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: + """Builds an untrained AlexNet neural network architecture for Tensorflow/Keras. + + Args: + input_shape: A shape tuple of integers, not including the batch size, specifying + the dimensions of the image data. The shape tuple for all classifiers in the + architecture registry follows the convention `(height, width, channels)`. + n_classes: The number of target labels in the dataset. + + Returns: + A :py:class:`~tf.keras.Sequential` object. + + See Also: + - :py:class:`tf.keras.Sequential` + """ + model = Sequential() + + # first conv-pool block: + model.add( + Conv2D( + 96, + kernel_size=(11, 11), + strides=(4, 4), + activation="relu", + input_shape=input_shape, + ) + ) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) + model.add(BatchNormalization()) + + # second conv-pool block: + model.add(Conv2D(256, kernel_size=(5, 5), activation="relu")) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) + model.add(BatchNormalization()) + + # third conv-pool block: + model.add(Conv2D(256, kernel_size=(3, 3), activation="relu")) + model.add(Conv2D(384, kernel_size=(3, 3), activation="relu")) + model.add(Conv2D(384, kernel_size=(3, 3), activation="relu")) + model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) + model.add(BatchNormalization()) + + # dense layers: + model.add(Flatten()) + model.add(Dense(4096, activation="tanh")) + model.add(Dropout(0.5)) + model.add(Dense(4096, activation="tanh")) + model.add(Dropout(0.5)) + + # output layer: + model.add(Dense(n_classes, activation="softmax")) + + return model + + +KERAS_CLASSIFIERS_REGISTRY: Dict[ + str, Callable[[Tuple[int, int, int], int], Sequential] +] = dict(shallow_net=shallow_net, le_net=le_net, alex_net=alex_net) diff --git a/examples/task-plugins/dioptra_custom/vc/estimators_methods.py b/examples/task-plugins/dioptra_custom/vc/estimators_methods.py new file mode 100644 index 000000000..28396c530 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/estimators_methods.py @@ -0,0 +1,122 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +import datetime +from typing import Any, Dict, Optional + +import mlflow +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.generics import estimator_predict, fit_estimator + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def fit( + estimator: Any, + x: Any = None, + y: Any = None, + fit_kwargs: Optional[Dict[str, Any]] = None, +) -> Any: + """Fits the estimator to the given data. + + This task plugin wraps :py:func:`~dioptra.sdk.generics.fit_estimator`, which is a + generic function that uses multiple argument dispatch to handle the estimator + fitting method for different machine learning libraries. The modules attached to the + advertised plugin entry point `dioptra.generics.fit_estimator` are used to build the + function dispatch registry at runtime. For more information on the supported fitting + methods and `fit_kwargs` arguments, please refer to the documentation of the + registered dispatch functions. + + Args: + estimator: The model to be trained. + x: The input data to be used for training. + y: The target data to be used for training. + fit_kwargs: An optional dictionary of keyword arguments to pass to the + dispatched function. + + Returns: + The object returned by the estimator's fitting function. For further details on + the type of object this method can return, see the documentation for the + registered dispatch functions. + + See Also: + - :py:func:`dioptra.sdk.generics.fit_estimator` + """ + fit_kwargs = fit_kwargs or {} + time_start: datetime.datetime = datetime.datetime.now() + + LOGGER.info( + "Begin estimator fit", + timestamp=time_start.isoformat(), + ) + + estimator_fit_result: Any = fit_estimator(estimator, x, y, **fit_kwargs) + + time_end: datetime.datetime = datetime.datetime.now() + + total_seconds: float = (time_end - time_start).total_seconds() + total_minutes: float = total_seconds / 60 + + mlflow.log_metric("training_time_in_minutes", total_minutes) + LOGGER.info( + "Estimator fit complete", + timestamp=time_end.isoformat(), + total_minutes=total_minutes, + ) + + return estimator_fit_result + + +@pyplugs.register +def predict( + estimator: Any, + x: Any = None, + predict_kwargs: Optional[Dict[str, Any]] = None, +) -> Any: + """Uses the estimator to make predictions on the given input data. + + This task plugin wraps :py:func:`~dioptra.sdk.generics.estimator_predict`, which is + a generic function that uses multiple argument dispatch to handle estimator + prediction methods for different machine learning libraries. The modules attached to + the advertised plugin entry point `dioptra.generics.estimator_predict` are used to + build the function dispatch registry at runtime. For more information on the + supported prediction methods and `predict_kwargs` arguments, refer to the + documentation of the registered dispatch functions. + + Args: + estimator: A trained model to be used to generate predictions. + x: The input data for which to generate predictions. + predict_kwargs: An optional dictionary of keyword arguments to pass to the + dispatched function. + + Returns: + The object returned by the estimator's predict function. For further details on + the type of object this method can return, see the documentation for the + registered dispatch functions. + + See Also: + - :py:func:`dioptra.sdk.generics.estimator_predict` + """ + predict_kwargs = predict_kwargs or {} + prediction: Any = estimator_predict(estimator, x, **predict_kwargs) + + return prediction diff --git a/examples/task-plugins/dioptra_custom/vc/import_keras.py b/examples/task-plugins/dioptra_custom/vc/import_keras.py new file mode 100644 index 000000000..b5d03b51c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/import_keras.py @@ -0,0 +1,65 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +import importlib +from types import FunctionType, ModuleType +from typing import Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.callbacks import Callback + from tensorflow.keras.metrics import Metric + from tensorflow.keras.optimizers import Optimizer + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + +KERAS_CALLBACKS: str = "tensorflow.keras.callbacks" +KERAS_METRICS: str = "tensorflow.keras.metrics" +KERAS_OPTIMIZERS: str = "tensorflow.keras.optimizers" + + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_callback(callback_name: str) -> Callback: + keras_callbacks: ModuleType = importlib.import_module(KERAS_CALLBACKS) + callback: Callback = getattr(keras_callbacks, callback_name) + return callback + + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_metric(metric_name: str) -> Union[Metric, FunctionType]: + keras_metrics: ModuleType = importlib.import_module(KERAS_METRICS) + metric: Metric = getattr(keras_metrics, metric_name) + return metric + + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_optimizer(optimizer_name: str) -> Optimizer: + keras_optimizers: ModuleType = importlib.import_module(KERAS_OPTIMIZERS) + optimizer: Optimizer = getattr(keras_optimizers, optimizer_name) + return optimizer diff --git a/examples/task-plugins/dioptra_custom/vc/metrics_distance.py b/examples/task-plugins/dioptra_custom/vc/metrics_distance.py new file mode 100644 index 000000000..034f5a02c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/metrics_distance.py @@ -0,0 +1,307 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for getting functions from a distance metric registry. + +.. |Linf| replace:: L\\ :sub:`∞` +.. |L1| replace:: L\\ :sub:`1` +.. |L2| replace:: L\\ :sub:`2` +""" + +from __future__ import annotations + +from typing import Any, Callable, Dict, List, Optional, Tuple + +import numpy as np +import structlog +from scipy.stats import wasserstein_distance +from sklearn.metrics.pairwise import paired_distances +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +from .metrics_exceptions import UnknownDistanceMetricError + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def get_distance_metric_list( + request: List[Dict[str, str]] +) -> List[Tuple[str, Callable[..., np.ndarray]]]: + """Gets multiple distance metric functions from the registry. + + The following metrics are available in the registry, + + - `l_inf_norm` + - `l_1_norm` + - `l_2_norm` + - `paired_cosine_similarities` + - `paired_euclidean_distances` + - `paired_manhattan_distances` + - `paired_wasserstein_distances` + + Args: + request: A list of dictionaries with the keys `name` and `func`. The `func` key + is used to lookup the metric function in the registry and must match one of + the metric names listed above. The `name` key is human-readable label for + the metric function. + + Returns: + A list of tuples with two elements. The first element of each tuple is the label + from the `name` key of `request`, and the second element is the callable metric + function. + """ + distance_metrics_list: List[Tuple[str, Callable[..., np.ndarray]]] = [] + + for metric in request: + metric_callable: Optional[Callable[..., np.ndarray]] = ( + DISTANCE_METRICS_REGISTRY.get(metric["func"]) + ) + + if metric_callable is not None: + distance_metrics_list.append((metric["name"], metric_callable)) + + else: + LOGGER.warn( + "Distance metric not in registry, skipping...", + name=metric["name"], + func=metric["func"], + ) + + return distance_metrics_list + + +@pyplugs.register +def get_distance_metric(func: str) -> Callable[..., np.ndarray]: + """Gets a distance metric function from the registry. + + The following metrics are available in the registry, + + - `l_inf_norm` + - `l_1_norm` + - `l_2_norm` + - `paired_cosine_similarities` + - `paired_euclidean_distances` + - `paired_manhattan_distances` + - `paired_wasserstein_distances` + + Args: + func: A string that identifies the distance metric to return from the registry. + The string must match one of the names of the metrics in the registry. + + Returns: + A callable distance metric function. + """ + metric_callable: Optional[Callable[..., np.ndarray]] = ( + DISTANCE_METRICS_REGISTRY.get(func) + ) + + if metric_callable is None: + LOGGER.error( + "Distance metric not in registry", + func=func, + ) + raise UnknownDistanceMetricError( + f"Could not find any distance metric named {func!r} in the metrics " + "plugin collection. Check spelling and try again." + ) + + return metric_callable + + +def l_inf_norm(y_true, y_pred) -> np.ndarray: + """Calculates the |Linf| norm between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of |Linf| norms. + """ + metric: np.ndarray = _matrix_difference_l_norm( + y_true=y_true, y_pred=y_pred, order=np.inf + ) + return metric + + +def l_1_norm(y_true, y_pred) -> np.ndarray: + """Calculates the |L1| norm between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of |L1| norms. + """ + metric: np.ndarray = _matrix_difference_l_norm( + y_true=y_true, y_pred=y_pred, order=1 + ) + return metric + + +def l_2_norm(y_true, y_pred) -> np.ndarray: + """Calculates the |L2| norm between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of |L2| norms. + """ + metric: np.ndarray = _matrix_difference_l_norm( + y_true=y_true, y_pred=y_pred, order=2 + ) + return metric + + +def paired_cosine_similarities(y_true, y_pred) -> np.ndarray: + """Calculates the cosine similarity between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of cosine similarities. + """ + y_true_normalized: np.ndarray = _normalize_batch(_flatten_batch(y_true), order=2) + y_pred_normalized: np.ndarray = _normalize_batch(_flatten_batch(y_pred), order=2) + metric: np.ndarray = np.sum(y_true_normalized * y_pred_normalized, axis=1) + return metric + + +def paired_euclidean_distances(y_true, y_pred) -> np.ndarray: + """Calculates the Euclidean distance between a batch of two matrices. + + The Euclidean distance is equivalent to the |L2| norm. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of euclidean distances. + """ + metric: np.ndarray = l_2_norm(y_true=y_true, y_pred=y_pred) + return metric + + +def paired_manhattan_distances(y_true, y_pred) -> np.ndarray: + """Calculates the Manhattan distance between a batch of two matrices. + + The Manhattan distance is equivalent to the |L1| norm. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of Manhattan distances. + """ + metric: np.ndarray = l_1_norm(y_true=y_true, y_pred=y_pred) + return metric + + +def paired_wasserstein_distances(y_true, y_pred, **kwargs) -> np.ndarray: + """Calculates the Wasserstein distance between a batch of two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of Wasserstein distances. + + See Also: + - :py:func:`scipy.stats.wasserstein_distance` + """ + + def wrapped_metric(X, Y): + return wasserstein_distance(u_values=X, v_values=Y, **kwargs) + + metric: np.ndarray = paired_distances( + X=_flatten_batch(y_true), Y=_flatten_batch(y_pred), metric=wrapped_metric + ) + return metric + + +def _flatten_batch(X: np.ndarray) -> np.ndarray: + """Flattens each of the matrices in a batch into a one-dimensional array. + + Args: + X: A batch of matrices. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of one-dimensional arrays. + """ + num_samples: int = X.shape[0] + num_matrix_elements: int = int(np.prod(X.shape[1:])) + return X.reshape((num_samples, num_matrix_elements)) + + +def _matrix_difference_l_norm(y_true, y_pred, order) -> np.ndarray: + """Calculates a batch of norms of the difference between two matrices. + + Args: + y_true: A batch of matrices containing the original or target values. + y_pred: A batch of matrices containing the perturbed or predicted values. + order: The order of the norm, see :py:func:`numpy.linalg.norm` for the full list + of norms that can be calculated. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of norms. + + See Also: + - :py:func:`numpy.linalg.norm` + """ + y_diff: np.ndarray = _flatten_batch(y_true - y_pred) + y_diff_l_norm: np.ndarray = np.linalg.norm(y_diff, axis=1, ord=order) + return y_diff_l_norm + + +def _normalize_batch(X: np.ndarray, order: int) -> np.ndarray: + """Normalizes a batch of matrices by their norms. + + Args: + X: A batch of matrices to be normalized. + order: The order of the norm used for normalization, see + :py:func:`numpy.linalg.norm` for the full list of available norms. + + Returns: + A :py:class:`numpy.ndarray` containing a batch of normalized matrices. + + See Also: + - :py:func:`numpy.linalg.norm` + """ + X_l_norm: np.ndarray = np.linalg.norm(X, axis=1, ord=order) + num_samples: int = X_l_norm.shape[0] + normalized_batch: np.ndarray = X / X_l_norm.reshape((num_samples, 1)) + return normalized_batch + + +DISTANCE_METRICS_REGISTRY: Dict[str, Callable[..., Any]] = dict( + l_inf_norm=l_inf_norm, + l_1_norm=l_1_norm, + l_2_norm=l_2_norm, + paired_cosine_similarities=paired_cosine_similarities, + paired_euclidean_distances=paired_euclidean_distances, + paired_manhattan_distances=paired_manhattan_distances, + paired_wasserstein_distances=paired_wasserstein_distances, +) diff --git a/examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py b/examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py new file mode 100644 index 000000000..fc88cc25c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py @@ -0,0 +1,27 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module of exceptions for the metrics plugins collection.""" + +from dioptra.sdk.exceptions.base import BaseTaskPluginError + + +class UnknownDistanceMetricError(BaseTaskPluginError): + """The requested distance metric could not be located.""" + + +class UnknownPerformanceMetricError(BaseTaskPluginError): + """The requested performance metric could not be located.""" diff --git a/examples/task-plugins/dioptra_custom/vc/mlflow.py b/examples/task-plugins/dioptra_custom/vc/mlflow.py new file mode 100644 index 000000000..8546dff8c --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/mlflow.py @@ -0,0 +1,103 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for using the MLFlow model registry.""" + +from __future__ import annotations + +from pathlib import Path +from typing import Optional + +import mlflow +import os +import structlog +from mlflow.entities.model_registry import ModelVersion +from mlflow.tracking import MlflowClient +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from .artifacts_restapi import upload_model_to_restapi +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def add_model_to_registry(name: str, model_dir: str) -> Optional[ModelVersion]: + """Registers a trained model logged during the current run to the MLFlow registry. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + name: The registration name to use for the model. + model_dir: The relative artifact directory where MLFlow logged the model trained + during the current run. + + Returns: + A :py:class:`~mlflow.entities.model_registry.ModelVersion` object created by the + backend. + """ + job_id = os.environ['__JOB_ID'] + if not name.strip(): + return None + + active_run = mlflow.active_run() + + run_id: str = active_run.info.run_id + artifact_uri: str = active_run.info.artifact_uri + source: str = f"{artifact_uri}/{model_dir}" + + registered_models = [x.name for x in MlflowClient().search_registered_models()] + + if name not in registered_models: + LOGGER.info("create registered model", name=name) + MlflowClient().create_registered_model(name=name) + + LOGGER.info("create model version", name=name, source=source, run_id=run_id) + model_version: ModelVersion = MlflowClient().create_model_version( + name=name, source=source, run_id=run_id + ) + upload_model_to_restapi(name, source, job_id) + + return model_version + + +@pyplugs.register +def get_experiment_name() -> str: + """Gets the name of the experiment for the current run. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + + Returns: + The name of the experiment. + """ + active_run = mlflow.active_run() + + experiment_name: str = ( + MlflowClient().get_experiment(active_run.info.experiment_id).name + ) + LOGGER.info( + "Obtained experiment name of active run", experiment_name=experiment_name + ) + + return experiment_name + + +@pyplugs.register +def prepend_cwd(path: str) -> Path: + ret = Path.cwd() / path + return ret + diff --git a/examples/task-plugins/dioptra_custom/vc/random_rng.py b/examples/task-plugins/dioptra_custom/vc/random_rng.py new file mode 100644 index 000000000..d10b2bd60 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/random_rng.py @@ -0,0 +1,56 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for managing random number generators.""" + +from __future__ import annotations + +from typing import Tuple + +import numpy as np +import structlog +from numpy.random._generator import Generator as RNGenerator +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +@pyplugs.task_nout(2) +def init_rng(seed: int = -1) -> Tuple[int, RNGenerator]: + """Constructs a new random number generator. + + Args: + seed: A seed to initialize the random number generator. If the value is less + than zero, then the seed is generated by pulling fresh, unpredictable + entropy from the OS. The default is `-1`. + + Returns: + A tuple containing the seed and the initialized random number generator. If a + `seed < 0` was passed as an argument, then the seed generated by the OS will be + returned. + + See Also: + - :py:func:`numpy.random.default_rng` + """ + rng = np.random.default_rng(seed if seed >= 0 else None) + + if seed < 0: + seed = rng.bit_generator._seed_seq.entropy # type: ignore[attr-defined] + + return int(seed), rng diff --git a/examples/task-plugins/dioptra_custom/vc/random_sample.py b/examples/task-plugins/dioptra_custom/vc/random_sample.py new file mode 100644 index 000000000..33c13d5d5 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/random_sample.py @@ -0,0 +1,89 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for drawing random samples.""" + +from __future__ import annotations + +from typing import Optional, Tuple, Union + +import numpy as np +import structlog +from numpy.random._generator import Generator as RNGenerator +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def draw_random_integer(rng: RNGenerator, low: int = 0, high: int = 2**31 - 1) -> int: + """Returns a random integer from `low` (inclusive) to `high` (exclusive). + + The integer is sampled from a uniform distribution. + + Args: + rng: A random number generator returned by :py:func:`~.rng.init_rng`. + low: Lowest (signed) integers to be drawn from the distribution (unless + `high=None`, in which case this parameter is `0` and this value is used for + `high`). + high: If not `None`, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if `high=None`) + + Returns: + A random integer. + + See Also: + - :py:meth:`numpy.random.Generator.integers` + """ + result: int = int(rng.integers(low=low, high=high)) + + return result + + +@pyplugs.register +def draw_random_integers( + rng: RNGenerator, + low: int = 0, + high: int = 2**31 - 1, + size: Optional[Union[int, Tuple[int, ...]]] = None, +) -> np.ndarray: + """Returns random integers from `low` (inclusive) to `high` (exclusive). + + The integers are sampled from a uniform distribution. + + Args: + rng: A random number generator returned by :py:func:`~.rng.init_rng`. + low: Lowest (signed) integers to be drawn from the distribution (unless + `high=None`, in which case this parameter is `0` and this value is used for + `high`). + high: If not `None`, one above the largest (signed) integer to be drawn from the + distribution (see above for behavior if `high=None`). + size: The output shape of array. If the given shape is, e.g., `(m, n, k)`, then + `m * n * k` samples are drawn. If `None`, a single value is returned. The + default is `None`. + + Returns: + A `size`-shaped array of random integers. + + See Also: + - :py:meth:`numpy.random.Generator.integers` + """ + size = size or 1 + result: np.ndarray = rng.integers(low=low, high=high, size=size) + + return result diff --git a/examples/task-plugins/dioptra_custom/vc/registry_art.py b/examples/task-plugins/dioptra_custom/vc/registry_art.py new file mode 100644 index 000000000..7286cf002 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/registry_art.py @@ -0,0 +1,107 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for interfacing the |ART| with the MLFlow model registry. + +.. |ART| replace:: `Adversarial Robustness Toolbox\ + `__ +""" + +from __future__ import annotations + +from typing import Any, Dict, Optional + +import numpy as np +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +from .registry_mlflow import load_tensorflow_keras_classifier + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.estimators.classification import TensorFlowV2Classifier + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras import losses + from tensorflow.keras.models import Sequential + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def load_wrapped_tensorflow_keras_classifier( + artifact_uri: str, + imagenet_preprocessing: bool = False, + classifier_kwargs: Optional[Dict[str, Any]] = None, +) -> TensorFlowV2Classifier: + """Loads and wraps a registered Keras classifier for compatibility with the |ART|. + + Args: + name: The name of the registered model in the MLFlow model registry. + version: The version number of the registered model in the MLFlow registry. + classifier_kwargs: A dictionary mapping argument names to values which will + be passed to the TensorFlowV2Classifier constructor. + Returns: + A trained :py:class:`~art.estimators.classification.TensorFlowV2Classifier` + object. + + See Also: + - :py:class:`art.estimators.classification.TensorFlowV2Classifier` + - :py:func:`.mlflow.load_tensorflow_keras_classifier` + """ + classifier_kwargs = classifier_kwargs or {} + keras_classifier: Sequential = load_tensorflow_keras_classifier( + uri=artifact_uri + ) + nb_classes = keras_classifier.output_shape[1] + input_shape = keras_classifier.input_shape + loss_object = losses.get(keras_classifier.loss) + preprocessing = ( + (np.array([103.939, 116.779, 123.680]), np.array([1.0, 1.0, 1.0])) + if imagenet_preprocessing + else None + ) + wrapped_keras_classifier: TensorFlowV2Classifier = TensorFlowV2Classifier( + model=keras_classifier, + nb_classes=nb_classes, + input_shape=input_shape, + loss_object=loss_object, + preprocessing=preprocessing, + **classifier_kwargs, + ) + LOGGER.info( + "Wrap Keras classifier for compatibility with Adversarial Robustness Toolbox" + ) + + return wrapped_keras_classifier diff --git a/examples/task-plugins/dioptra_custom/vc/registry_mlflow.py b/examples/task-plugins/dioptra_custom/vc/registry_mlflow.py new file mode 100644 index 000000000..23d8519aa --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/registry_mlflow.py @@ -0,0 +1,120 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for using the MLFlow model registry.""" + +from __future__ import annotations + +from typing import Optional + +import mlflow +import structlog +from mlflow.entities import Run as MlflowRun +from mlflow.entities.model_registry import ModelVersion +from mlflow.tracking import MlflowClient +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.models import Sequential + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +def add_model_to_registry( + active_run: MlflowRun, name: str, model_dir: str +) -> Optional[ModelVersion]: + """Registers a trained model logged during the current run to the MLFlow registry. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + name: The registration name to use for the model. + model_dir: The relative artifact directory where MLFlow logged the model trained + during the current run. + + Returns: + A :py:class:`~mlflow.entities.model_registry.ModelVersion` object created by the + backend. + """ + if not name.strip(): + return None + + run_id: str = active_run.info.run_id + artifact_uri: str = active_run.info.artifact_uri + source: str = f"{artifact_uri}/{model_dir}" + + registered_models = [x.name for x in MlflowClient().search_registered_models()] + + if name not in registered_models: + LOGGER.info("create registered model", name=name) + MlflowClient().create_registered_model(name=name) + + LOGGER.info("create model version", name=name, source=source, run_id=run_id) + model_version: ModelVersion = MlflowClient().create_model_version( + name=name, source=source, run_id=run_id + ) + + return model_version + + +@pyplugs.register +def get_experiment_name(active_run: MlflowRun) -> str: + """Gets the name of the experiment for the current run. + + Args: + active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's + state. + + Returns: + The name of the experiment. + """ + experiment_name: str = ( + MlflowClient().get_experiment(active_run.info.experiment_id).name + ) + LOGGER.info( + "Obtained experiment name of active run", experiment_name=experiment_name + ) + + return experiment_name + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def load_tensorflow_keras_classifier(uri: str) -> Sequential: + """Loads a registered Keras classifier. + + Args: + name: The name of the registered model in the MLFlow model registry. + version: The version number of the registered model in the MLFlow registry. + + Returns: + A trained :py:class:`tf.keras.Sequential` object. + """ + LOGGER.info("Load Keras classifier from model registry", uri=uri) + + return mlflow.keras.load_model(model_uri=uri) + \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/vc/tensorflow.py b/examples/task-plugins/dioptra_custom/vc/tensorflow.py new file mode 100644 index 000000000..1d640e2c1 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/tensorflow.py @@ -0,0 +1,112 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +from types import FunctionType +from typing import Any, Dict, List, Union + +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +from . import import_keras + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.callbacks import Callback + from tensorflow.keras.metrics import Metric + from tensorflow.keras.optimizers import Optimizer + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def evaluate_metrics_tensorflow(classifier, dataset) -> Dict[str, float]: + result = classifier.evaluate(dataset, verbose=0) + return dict(zip(classifier.metrics_names, result)) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_optimizer(optimizer: str, learning_rate: float) -> Optimizer: + return import_keras.get_optimizer(optimizer)(learning_rate) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_model_callbacks(callbacks_list: List[Dict[str, Any]]) -> List[Callback]: + return [ + import_keras.get_callback(callback["name"])(**callback.get("parameters", {})) + for callback in callbacks_list + ] + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def get_performance_metrics( + metrics_list: List[Dict[str, Any]] +) -> List[Union[Metric, FunctionType]]: + performance_metrics: List[Metric] = [] + + for metric in metrics_list: + new_metric: Union[Metric, FunctionType] = import_keras.get_metric( + metric["name"] + ) + performance_metrics.append( + new_metric(**metric.get("parameters")) + if not isinstance(new_metric, FunctionType) and metric.get("parameters") + else new_metric + ) + + return performance_metrics + +@pyplugs.register +def process_int_list(arg: str): + lst = arg.replace('[','').replace(']', '').replace(' ','') + lst = list(map(lambda x: int(x), lst.split(','))) + return lst + +@pyplugs.register +def process_float_list(arg: str): + lst = arg.replace('[','').replace(']', '').replace(' ','') + lst = list(map(lambda x: float(x), lst.split(','))) + return lst + +@pyplugs.register +def process_float(arg: str): + return float(arg) + +@pyplugs.register +def process_int(arg: str): + return int(arg) + +@pyplugs.register +def process_bool(arg: str): + return bool(arg) + +@pyplugs.register +def get_none(arg: str): + return None \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py b/examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py new file mode 100644 index 000000000..f7f878cd5 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py @@ -0,0 +1,99 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for using the MLFlow Tracking service.""" + +from __future__ import annotations + +from typing import Dict + +import mlflow +import structlog +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from tensorflow.keras.models import Sequential + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +def log_metrics(metrics: Dict[str, float]) -> None: + """Logs metrics to the MLFlow Tracking service for the current run. + + Args: + metrics: A dictionary with the metrics to be logged. The keys are the metric + names and the values are the metric values. + + See Also: + - :py:func:`mlflow.log_metric` + """ + for metric_name, metric_value in metrics.items(): + mlflow.log_metric(key=metric_name, value=metric_value) + LOGGER.info( + "Log metric to MLFlow Tracking server", + metric_name=metric_name, + metric_value=metric_value, + ) + + +@pyplugs.register +def log_parameters(parameters: Dict[str, float]) -> None: + """Logs parameters to the MLFlow Tracking service for the current run. + + Parameters can only be set once per run. + + Args: + parameters: A dictionary with the parameters to be logged. The keys are the + parameter names and the values are the parameter values. + + See Also: + - :py:func:`mlflow.log_param` + """ + for parameter_name, parameter_value in parameters.items(): + mlflow.log_param(key=parameter_name, value=parameter_value) + LOGGER.info( + "Log parameter to MLFlow Tracking server", + parameter_name=parameter_name, + parameter_value=parameter_value, + ) + + +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def log_tensorflow_keras_estimator(estimator: Sequential, model_dir: str) -> None: + """Logs a Keras estimator trained during the current run to the MLFlow registry. + + Args: + estimator: A trained Keras estimator. + model_dir: The relative artifact directory where MLFlow should save the + model. + """ + mlflow.keras.log_model(model=estimator, artifact_path=model_dir) + LOGGER.info( + "Tensorflow Keras model logged to tracking server", + model_dir=model_dir, + ) diff --git a/examples/v1-client-tensorflow-mnist-classifier/README.md b/examples/v1-client-tensorflow-mnist-classifier/README.md new file mode 100644 index 000000000..55340bbdc --- /dev/null +++ b/examples/v1-client-tensorflow-mnist-classifier/README.md @@ -0,0 +1,22 @@ +# Tensorflow MNIST Classifier demo + +This example demonstrates how to run a simple experiment on the transferability of the fast gradient method (FGM) evasion attack between two neural network architectures. +The demo can be found in the Jupyter notebook file [demo.ipynb](demo.ipynb). + +## Running the example + +To prepare your environment for running this example, follow the linked instructions below: + +1. [Create and activate a Python virtual environment and install the necessary dependencies](../README.md#creating-a-virtual-environment) +2. [Download the MNIST dataset using the download_data.py script.](../README.md#downloading-datasets) +3. [Follow the links in these User Setup instructions](../../README.md#user-setup) to do the following: + - Build the containers + - Use the cookiecutter template to generate the scripts, configuration files, and Docker Compose files you will need to run Dioptra +4. [Edit the docker-compose.yml file to mount the data folder in the worker containers](../README.md#mounting-the-data-folder-in-the-worker-containers) +5. [Initialize and start Dioptra](https://pages.nist.gov/dioptra/getting-started/running-dioptra.html#initializing-the-deployment) +6. [Register the custom task plugins for Dioptra's examples and demos](../README.md#registering-custom-task-plugins) +7. [Register the queues for Dioptra's examples and demos](../README.md#registering-queues) +8. [Start JupyterLab and open `demo.ipynb`](../README.md#starting-jupyter-lab) + +Steps 1–4 and 6–7 only need to be run once. +**Returning users only need to repeat Steps 5 (if you stopped Dioptra using `docker compose down`) and 8 (if you stopped the `jupyter lab` process)**. diff --git a/examples/v1-client-tensorflow-mnist-classifier/demo.ipynb b/examples/v1-client-tensorflow-mnist-classifier/demo.ipynb new file mode 100644 index 000000000..65b606d50 --- /dev/null +++ b/examples/v1-client-tensorflow-mnist-classifier/demo.ipynb @@ -0,0 +1,669 @@ +{ + "cells": [ + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "# Tensorflow MNIST Classifier demo" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "This notebook contains an end-to-end demostration of Dioptra that can be run on any modern laptop.\n", + "Please see the [example README](README.md) for instructions on how to prepare your environment for running this example." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Setup" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Below we import the necessary Python modules and ensure the proper environment variables are set so that all the code blocks will work as expected," + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "EXPERIMENT_NAME = \"mnist_fgm\"\n", + "EXPERIMENT_DESC = \"applying the fast gradient sign (FGM) attack to a classifier trained on MNIST\"\n", + "QUEUE_NAME = 'tensorflow_cpu'\n", + "QUEUE_DESC = 'Tensorflow CPU Queue'\n", + "PLUGIN_FILES = '../task-plugins/dioptra_custom/vc/'\n", + "MODEL_NAME = \"mnist_classifier\"\n", + "\n", + "# Default address for accessing the RESTful API service\n", + "RESTAPI_ADDRESS = \"http://localhost:20080\"\n", + "\n", + "# Default address for accessing the MLFlow Tracking server\n", + "MLFLOW_TRACKING_URI = \"http://localhost:35000\"" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "# Import packages from the Python standard library\n", + "import importlib.util\n", + "import os\n", + "import sys\n", + "import pprint\n", + "import time\n", + "import warnings\n", + "from pathlib import Path\n", + "from IPython.display import display, clear_output\n", + "import logging\n", + "import structlog\n", + "import yaml\n", + "\n", + "# Filter out warning messages\n", + "warnings.filterwarnings(\"ignore\")\n", + "structlog.configure(\n", + " wrapper_class=structlog.make_filtering_bound_logger(logging.CRITICAL),\n", + ")\n", + "\n", + "def register_python_source_file(module_name: str, filepath: Path) -> None:\n", + " \"\"\"Import a source file directly.\n", + "\n", + " Args:\n", + " module_name: The module name to associate with the imported source file.\n", + " filepath: The path to the source file.\n", + "\n", + " Notes:\n", + " Adapted from the following implementation in the Python documentation:\n", + " https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n", + " \"\"\"\n", + " spec = importlib.util.spec_from_file_location(module_name, str(filepath))\n", + " module = importlib.util.module_from_spec(spec)\n", + " sys.modules[module_name] = module\n", + " spec.loader.exec_module(module)\n", + "register_python_source_file(\"scripts\", Path(\"..\", \"scripts\", \"__init__.py\"))\n", + "\n", + "# Register the examples/scripts directory as a Python module\n", + "from scripts.client import DioptraClient\n", + "from scripts.utils import make_tar\n", + "\n", + "# Set DIOPTRA_RESTAPI_URI variable if not defined, used to connect to RESTful API service\n", + "if os.getenv(\"DIOPTRA_RESTAPI_URI\") is None:\n", + " os.environ[\"DIOPTRA_RESTAPI_URI\"] = RESTAPI_ADDRESS\n", + "\n", + "# Set MLFLOW_TRACKING_URI variable, used to connect to MLFlow Tracking service\n", + "if os.getenv(\"MLFLOW_TRACKING_URI\") is None:\n", + " os.environ[\"MLFLOW_TRACKING_URI\"] = MLFLOW_TRACKING_URI" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Dataset" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "We obtained a copy of the MNIST dataset when we ran `download_data.py` script. If you have not done so already, see [How to Obtain Common Datasets](https://pages.nist.gov/dioptra/getting-started/acquiring-datasets.html).\n", + "The training and testing images for the MNIST dataset are stored within the `/dioptra/data/Mnist` directory as PNG files that are organized into the following folder structure," + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + " Mnist\n", + " ├── testing\n", + " │ ├── 0\n", + " │ ├── 1\n", + " │ ├── 2\n", + " │ ├── 3\n", + " │ ├── 4\n", + " │ ├── 5\n", + " │ ├── 6\n", + " │ ├── 7\n", + " │ ├── 8\n", + " │ └── 9\n", + " └── training\n", + " ├── 0\n", + " ├── 1\n", + " ├── 2\n", + " ├── 3\n", + " ├── 4\n", + " ├── 5\n", + " ├── 6\n", + " ├── 7\n", + " ├── 8\n", + " └── 9" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The subfolders under `training/` and `testing/` are the classification labels for the images in the dataset.\n", + "This folder structure is a standardized way to encode the label information and many libraries can make use of it, including the Tensorflow library that we are using for this particular demo." + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "## Submit and run jobs" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "To connect with the endpoint, we will use a client class defined in the `examples/scripts/client.py` file that is able to connect with the Dioptra RESTful API using the HTTP protocol.\n", + "We connect using the client below.\n", + "The client uses the environment variable `DIOPTRA_RESTAPI_URI`, which we configured at the top of the notebook, to figure out how to connect to the Dioptra RESTful API." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "tags": [] + }, + "outputs": [], + "source": [ + "client = DioptraClient()" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "It is necessary to login to the RESTAPI to be able to perform any functions. Here we create a user if it is not created already, and login with it." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "try:\n", + " client.users.create('pluginuser','pluginuser@dioptra.nccoe.nist.gov','pleasemakesuretoPLUGINthecomputer','pleasemakesuretoPLUGINthecomputer')\n", + "except:\n", + " pass # ignore if user exists already\n", + "client.auth.login('pluginuser','pleasemakesuretoPLUGINthecomputer')" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following function can be used to clear all experiments, entrypoints, jobs, models, plugins, tags, and queues in the database, if a fresh start is desired. It is not currently used anywhere in this notebook, but is included for utility." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def delete_all():\n", + " for d in client.experiments.get_all(pageLength=100000)['data']:\n", + " client.experiments.delete_by_id(d['id'])\n", + " for d in client.entrypoints.get_all(pageLength=100000)['data']:\n", + " client.entrypoints.delete_by_id(d['id'])\n", + " for d in client.jobs.get_all(pageLength=100000)['data']:\n", + " client.jobs.delete_by_id(d['id'])\n", + " for d in client.models.get_all(pageLength=100000)['data']:\n", + " client.models.delete_by_id(d['id'])\n", + " for d in client.plugins.get_all(pageLength=100000)['data']:\n", + " try:\n", + " client.plugins.delete_by_id(d['id'])\n", + " except:\n", + " pass\n", + " for d in client.tags.get_all(pageLength=100000)['data']:\n", + " client.tags.delete_by_id(d['id'])\n", + " for d in client.pluginParameterTypes.get_all(pageLength=100000)['data']:\n", + " try:\n", + " client.pluginParameterTypes.delete_by_id(d['id'])\n", + " except:\n", + " pass\n", + " for d in client.queues.get_all(pageLength=100000)['data']:\n", + " client.queues.delete_by_id(d['id'])" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "The following functions are used for registering plugins located in the `../examples/task-plugins/` folder, associating them with endpoints in the ./src/ folder, and then associating those endpoints with an experiment. When `run_experiment` is called, it will create plugins based on the YML files provided, and upload any additional files in the directory specified by `PLUGIN_FILES` at the top of the notebook." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true, + "tags": [] + }, + "outputs": [], + "source": [ + "basic_types = ['integer', 'string', 'number', 'any', 'boolean', 'null']\n", + "\n", + "def create_or_get_experiment(group, name, description, entrypoints):\n", + " found = None\n", + " for exp in client.experiments.get_all(search=name,pageLength=100000)['data']:\n", + " if exp['name'] == name:\n", + " found = exp\n", + " if (found != None):\n", + " client.experiments.modify_by_id(found['id'], name, description, entrypoints)\n", + " return found\n", + " else:\n", + " return client.experiments.create(group, name, description, entrypoints)\n", + "def create_or_get_entrypoints(group, name, description, taskGraph, parameters, queues, plugins):\n", + " found = None\n", + " for entrypoint in client.entrypoints.get_all(search=name,pageLength=100000)['data']:\n", + " if entrypoint['name'] == name:\n", + " found = entrypoint\n", + " if (found != None):\n", + " client.entrypoints.modify_by_id(found['id'], name, description, taskGraph, parameters, queues)\n", + " client.entrypoints.add_plugins_by_entrypoint_id(found['id'], plugins)\n", + " return found\n", + " else:\n", + " return client.entrypoints.create(group, name, description, taskGraph, parameters, queues, plugins)\n", + "def create_or_get_plugin_type(group, name, description, structure):\n", + " ret = None\n", + " for pt in client.pluginParameterTypes.get_all(pageLength=100000)['data']:\n", + " if (pt['name'] == name):\n", + " ret = pt\n", + " if (ret is None):\n", + " ret = client.pluginParameterTypes.create(group, name, description, structure)\n", + " return ret\n", + "def find_plugin_type(name, types):\n", + " for t in types.keys():\n", + " if t == name:\n", + " return create_or_get_plugin_type(1, name, name, types[t])['id']\n", + " for t in basic_types:\n", + " if t == name:\n", + " return create_or_get_plugin_type(1, name, 'primitive', {})['id']\n", + "\n", + " print(\"Couldn't find type\", name, \"in types definition.\")\n", + "\n", + "def create_or_get_queue(group, name, description):\n", + " ret = None\n", + " for queue in client.queues.get_all(pageLength=100000)['data']:\n", + " if queue['name'] == name:\n", + " ret = queue\n", + " if (ret is None):\n", + " ret = client.queues.create(group, name, description)\n", + " return ret\n", + "def plugin_to_py(plugin):\n", + " return '../task-plugins/' + '/'.join(plugin.split('.')[:-1]) + '.py'\n", + "def create_inputParam_object(inputs, types):\n", + " ret = []\n", + " for inp in inputs:\n", + " if 'name' in inp:\n", + " inp_name = inp['name']\n", + " inp_type = inp['type']\n", + " else:\n", + " inp_name = list(inp.keys())[0]\n", + " inp_type = inp[inp_name]\n", + " if 'required' in inp:\n", + " inp_req = inp['required']\n", + " else:\n", + " inp_req = True\n", + " inp_type = find_plugin_type(inp_type, types)\n", + " ret += [{\n", + " 'name': inp_name,\n", + " 'parameterType': inp_type,\n", + " 'required': inp_req\n", + " }]\n", + " return ret\n", + "def create_outputParam_object(outputs, types):\n", + " ret = []\n", + " for outp in outputs:\n", + " if isinstance(outp, dict):\n", + " outp_name = list(outp.keys())[0]\n", + " outp_type = outp[outp_name]\n", + " else:\n", + " outp_name = outp\n", + " outp_type = outputs[outp_name]\n", + " outp_type = find_plugin_type(outp_type, types)\n", + " ret += [{\n", + " 'name': outp_name,\n", + " 'parameterType': outp_type,\n", + " }]\n", + " return ret\n", + "\n", + "def read_yaml(filename):\n", + " with open(filename) as stream:\n", + " try:\n", + " ret = yaml.safe_load(stream)\n", + " except yaml.YAMLError as exc:\n", + " print(exc)\n", + " return ret\n", + "def register_basic_types(declared):\n", + " for q in basic_types:\n", + " type_def = create_or_get_plugin_type(1, q, 'primitive', {})\n", + " for q in declared:\n", + " type_def = create_or_get_plugin_type(1, q, 'declared', declared[q])\n", + "def get_plugins_to_register(yaml_file, plugins_to_upload=None):\n", + " plugins_to_upload = {} if plugins_to_upload is None else plugins_to_upload\n", + " yaml = read_yaml(yaml_file)\n", + " task_graph = yaml['graph']\n", + " plugins = yaml['tasks']\n", + " types = yaml['types']\n", + " \n", + " register_basic_types(types)\n", + " tasks = []\n", + " for plugin in plugins:\n", + " name = plugin\n", + " definition = plugins[plugin]\n", + " python_file = plugin_to_py(definition['plugin'])\n", + " upload = {}\n", + " upload['name'] = name\n", + " if 'inputs' in definition:\n", + " inputs = definition['inputs']\n", + " upload['inputParams'] = create_inputParam_object(inputs, types)\n", + " else:\n", + " upload['inputParams'] = []\n", + " if 'outputs' in definition:\n", + " outputs = definition['outputs']\n", + " upload['outputParams'] = create_outputParam_object(outputs, types) \n", + " else:\n", + " upload['outputParams'] = []\n", + " if (python_file in plugins_to_upload):\n", + " plugins_to_upload[python_file] += [upload]\n", + " else:\n", + " plugins_to_upload[python_file] = [upload]\n", + " return plugins_to_upload\n", + "def create_or_get_plugin(group, name, description):\n", + " ret = None\n", + " for plugin in client.plugins.get_all(search=name,pageLength=100000)['data']:\n", + " if plugin['name'] == name:\n", + " ret = plugin\n", + " if (ret is None):\n", + " ret = client.plugins.create(group, name, description)\n", + " return ret\n", + "def create_or_modify_plugin_file(plugin_id, filename, contents, description, tasks):\n", + " found = None\n", + " for plugin_file in client.plugins.files.get_files_by_plugin_id(plugin_id, pageLength=100000)['data']:\n", + " if plugin_file['filename'] == filename:\n", + " found = plugin_file\n", + " if (found != None):\n", + " return client.plugins.files.modify_files_by_plugin_id_file_id(plugin_id, found['id'], filename, contents, description, tasks)\n", + " else:\n", + " return client.plugins.files.create_files_by_plugin_id(plugin_id, filename, contents, description, tasks)\n", + "def register_plugins(group, plugins_to_upload):\n", + " plugins = []\n", + " for plugin_file in plugins_to_upload.keys():\n", + " plugin_path = Path(plugin_file)\n", + " contents = plugin_path.read_text().replace(\"\\r\", '')\n", + " tasks = plugins_to_upload[plugin_file]\n", + " filename = plugin_path.name\n", + " description = 'custom plugin for ' + filename\n", + " plugin_id = create_or_get_plugin(group, plugin_path.parent.name, description)['id']\n", + " plugins += [plugin_id]\n", + " uploaded_file = create_or_modify_plugin_file(plugin_id, filename, contents, description, tasks)\n", + " return list(set(plugins))\n", + "def create_parameters_object(params, modify):\n", + " ret = []\n", + " type_map = {'int': 'float', 'float':'float', 'string':'string'}\n", + " for p in params:\n", + " if (type(params[p]).__name__ in type_map.keys()):\n", + " paramType = type_map[type(params[p]).__name__]\n", + " paramType='string' # TODO: remove if backend can handle types correctly\n", + " defaultValue = str(params[p])\n", + " else:\n", + " defaultValue = str(params[p])\n", + " paramType = 'string'\n", + "\n", + " if p in modify.keys():\n", + " defaultValue = str(modify[p])\n", + " name = p\n", + " param_obj = {\n", + " 'name': name,\n", + " 'defaultValue': str(defaultValue),\n", + " 'parameterType': paramType\n", + " }\n", + " ret += [param_obj]\n", + " return ret\n", + "def get_graph_for_upload(yaml_text):\n", + " i = 0\n", + " for line in yaml_text:\n", + " if line.startswith(\"graph:\"):\n", + " break\n", + " i += 1\n", + " return ''.join(yaml_text[i+1:])\n", + "def get_parameters_for_upload(yaml_text):\n", + " i = 0\n", + " for line in yaml_text:\n", + " if line.startswith(\"parameters:\"):\n", + " start = i\n", + " if line.startswith(\"tasks:\"):\n", + " break\n", + " i += 1\n", + " return yaml_text[start:i+1]\n", + "def register_entrypoint(group, name, description, queues, plugins, yaml_file, modify_params=None):\n", + " modify_params = {} if modify_params is None else modify_params\n", + " yaml = read_yaml(yaml_file)\n", + " #task_graph = yaml['graph']\n", + " parameters = yaml['parameters']\n", + " \n", + " with open(yaml_file, 'r') as f:\n", + " lines = f.readlines()\n", + " task_graph = get_graph_for_upload(lines).replace('\\r','')\n", + " \n", + " entrypoint = create_or_get_entrypoints(1, name, description, task_graph, create_parameters_object(parameters, modify_params), queues, plugins)\n", + " return entrypoint\n", + "def add_missing_plugin_files(location, upload):\n", + " p = Path(location)\n", + " for child in p.iterdir():\n", + " if (child.name.endswith('.py')):\n", + " if (str(child) not in upload.keys()):\n", + " upload[str(child)] = []\n", + " return upload" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`run_experiment` uses the helper functions above to do the following tasks:\n", + " - create a queue specified by `QUEUE_NAME` if needed\n", + " - upload the plugins used by the specified `entrypoint` \n", + " - upload any other plugin files in the directory `PLUGIN_FILES`\n", + " - register the entrypoint in Dioptra\n", + " - create the experiment (if needed) and associate the entrypoint with the experiment\n", + " - start a job for the specified `entrypoint` on the queue `QUEUE_NAME`\n", + "Note that any parameters passed in to `parameters` will overwrite the defaults in the specified YML file." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def run_experiment(entrypoint, entrypoint_name, entrypoint_desc, job_time_limit, parameters={}):\n", + " upload = get_plugins_to_register(entrypoint, {})\n", + " upload = add_missing_plugin_files(PLUGIN_FILES, upload)\n", + " queue = create_or_get_queue(1, QUEUE_NAME, QUEUE_DESC)\n", + " queues = [queue['id']]\n", + " plugins = register_plugins(1,upload)\n", + " entrypoint = register_entrypoint(1, entrypoint_name, entrypoint_desc, queues, plugins, entrypoint, parameters)\n", + " experiment = create_or_get_experiment(1, EXPERIMENT_NAME, EXPERIMENT_DESC, [entrypoint['id']])\n", + " return client.experiments.create_jobs_by_experiment_id(experiment['id'], entrypoint_desc, queue['id'], entrypoint['id'], {}, job_time_limit)" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "`wait_for_job` stalls til the previous job was finished, which is useful for jobs which depend on the output of other jobs." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def wait_for_job(job, job_name):\n", + " n = 0\n", + " while job['status'] != 'finished': \n", + " job = client.jobs.get_by_id(job['id'])\n", + " time.sleep(1)\n", + " clear_output(wait=True)\n", + " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", + " n += 1\n", + " clear_output(wait=True)\n", + " display(f\"Job finished. Starting {job_name} job.\")\n", + " " + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Next, we need to train our model. This particular entrypoint uses a LeNet-5 model.\n", + "Depending on the specs of your computer, it can take 5-20 minutes or longer to complete.\n", + "If you are fortunate enough to have access to a dedicated GPU, then the training time will be much shorter." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "entrypoint = 'src/train.yml'\n", + "entrypoint_name = 'train'\n", + "entrypoint_desc = 'training a classifier on MNIST'\n", + "job_time_limit = '1h'\n", + "\n", + "training_job = run_experiment(entrypoint, \n", + " entrypoint_name, \n", + " entrypoint_desc,\n", + " job_time_limit,\n", + " {\"epochs_p\":1})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Now that we have trained a model, next we will apply the fast-gradient method (FGM) evasion attack on it to generate adversarial images.\n", + "\n", + "This specific workflow is an example of jobs that contain dependencies, as the metric evaluation jobs cannot start until the adversarial image generation jobs have completed, and the adversarial image generation job cannot start until the training job has completed.\n", + "\n", + "Note that the training_job id is needed to tell the FGM attack which model to generate examples against." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "entrypoint = 'src/fgm.yml'\n", + "entrypoint_name = 'fgm'\n", + "entrypoint_desc = 'generating examples on mnist_classifier using the fgm attack'\n", + "job_time_limit = '1h'\n", + "\n", + "wait_for_job(training_job, entrypoint_name)\n", + "fgm_job = run_experiment(entrypoint,\n", + " entrypoint_name,\n", + " entrypoint_desc,\n", + " job_time_limit,\n", + " {\"training_job_id\": training_job['id']})\n" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "Finally, we can test out the results of our adversarial attack on the model we trained earlier. This will wait for the FGM job to finish, and then evaluate the model's performance on the adversarial examples. Note that we need to know both the `fgm_job` id as well as the `training_job` id, so that this entrypoint knows which run's adversarial examples to test against which model. \n", + "\n", + "The previous runs are all stored in Dioptra as well, so you can always go back later and retrieve examples, models, and even the code used to create them." + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "entrypoint = 'src/infer.yml'\n", + "entrypoint_name = 'infer'\n", + "entrypoint_desc = 'evaluating performance of mnist_classifier on generated fgm examples'\n", + "job_time_limit = '1h'\n", + "\n", + "wait_for_job(fgm_job, entrypoint_name)\n", + "infer_job = run_experiment(entrypoint, \n", + " entrypoint_name,\n", + " entrypoint_desc,\n", + " job_time_limit,\n", + " {\"fgm_job_id\": fgm_job['id'], \"training_job_id\": training_job['id']})\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.tracking import MlflowClient\n", + "from uuid import UUID\n", + "mlflow_client = MlflowClient()\n", + "mlflow_runid = UUID(client.jobs.get_mlflow_run_id(infer_job['id'])['mlflowRunId']).hex\n", + "mlflow_run = mlflow_client.get_run(mlflow_runid)\n", + "pprint.pprint(mlflow_run.data.metrics)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [] + } + ], + "metadata": { + "interpreter": { + "hash": "edee40310913f16e2ca02c1d37887bcb7f07f00399ca119bb7e27de7d632ea99" + }, + "kernelspec": { + "display_name": "Python 3 (ipykernel)", + "language": "python", + "name": "python3" + }, + "language_info": { + "codemirror_mode": { + "name": "ipython", + "version": 3 + }, + "file_extension": ".py", + "mimetype": "text/x-python", + "name": "python", + "nbconvert_exporter": "python", + "pygments_lexer": "ipython3", + "version": "3.10.13" + } + }, + "nbformat": 4, + "nbformat_minor": 4 +} diff --git a/examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml b/examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml new file mode 100644 index 000000000..8d86b3ded --- /dev/null +++ b/examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml @@ -0,0 +1,320 @@ +types: + rng: + path: + path_string: + union: [string, any] + path_string_null: + union: [path_string, "null"] + dirs: + list: path_string + parameters: + mapping: [string, number] + kwargs: + mapping: [string, any] + kwargs_null: + union: [kwargs, "null"] + keras_classifier: + distance_metric_request: + mapping: [string, string] + distance_metrics_requests: + list: + mapping: [string, string] + distance_metric: + tuple: [string, any] + distance_metrics: + list: + tuple: [string, any] + distance_metrics_null: + union: + - list: + tuple: [string, any] + - "null" + dataframe: + image_size: + tuple: [integer, integer, integer] + clip_values: + tuple: [float, float, float] + norm: + union: [integer, number, string] + +parameters: + data_dir: /dioptra/data/Mnist/testing + image_size_p: [28, 28, 1] + adv_tar_name: testing_adversarial_fgm.tar.gz + adv_data_dir: adv_testing + distance_metrics_filename: distance_metrics.csv + training_job_id: + clip_values_p: [0, 1] + batch_size_p: 32 + eps_p: 0.3 + eps_step_p: 0.1 + minimal_p: false + norm: "inf" + seed_p: -1 + +tasks: + init_rng: + plugin: dioptra_custom.vc.random_rng.init_rng + inputs: + - name: seed + type: integer + required: false + outputs: + - ret1: integer + - ret2: rng + + draw_random_integer: + plugin: dioptra_custom.vc.random_sample.draw_random_integer + inputs: + - rng: rng + - name: low + type: integer + required: false + - name: high + type: integer + required: false + outputs: + value: integer + + init_tensorflow: + plugin: dioptra_custom.vc.backend_configs_tensorflow.init_tensorflow + inputs: + - seed: integer + + make_directories: + plugin: dioptra_custom.vc.artifacts_utils.make_directories + inputs: + - dirs: dirs + + log_parameters: + plugin: dioptra_custom.vc.tracking_mlflow.log_parameters + inputs: + - parameters: parameters + + load_wrapped_tensorflow_keras_classifier: + plugin: dioptra_custom.vc.registry_art.load_wrapped_tensorflow_keras_classifier + inputs: + - artifact_uri: string + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: keras_classifier + + get_distance_metric_list: + plugin: dioptra_custom.vc.metrics_distance.get_distance_metric_list + inputs: + - request: distance_metrics_requests + outputs: + distance_metrics_list: distance_metrics + + create_adversarial_fgm_dataset: + plugin: dioptra_custom.vc.attacks_fgm.create_adversarial_fgm_dataset + inputs: + - data_dir: string + - adv_data_dir: path_string + - keras_classifier: keras_classifier + - image_size: image_size + - name: distance_metrics_list + type: distance_metrics_null + required: false + - name: rescale + type: number + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: eps + type: number + required: false + - name: eps_step + type: number + required: false + - name: minimal + type: boolean + required: false + - name: norm + type: norm + required: false + outputs: + dataset: dataframe + + upload_directory_as_tarball_artifact: + plugin: dioptra_custom.vc.artifacts_mlflow.upload_directory_as_tarball_artifact + inputs: + - source_dir: path_string + - tarball_filename: string + - name: tarball_write_mode + type: string + required: false + - name: working_dir + type: path_string_null + required: false + + upload_data_frame_artifact: + plugin: dioptra_custom.vc.artifacts_mlflow.upload_data_frame_artifact + inputs: + - data_frame: dataframe + - file_name: string + - file_format: string + - name: file_format_kwargs + type: kwargs_null + required: false + - name: working_dir + type: path_string_null + required: false + get_uri_for_artifact: + plugin: dioptra_custom.vc.artifacts_restapi.get_uri_for_artifact + inputs: + - job_id: string + - name: index + type: integer + required: false + outputs: + ret: string + get_none: + plugin: dioptra_custom.vc.tensorflow.get_none + inputs: + - arg: string + outputs: + ret: "null" + process_float: + plugin: dioptra_custom.vc.tensorflow.process_float + inputs: + - arg: string + outputs: + ret: number + process_bool: + plugin: dioptra_custom.vc.tensorflow.process_bool + inputs: + - arg: string + outputs: + ret: boolean + process_int: + plugin: dioptra_custom.vc.tensorflow.process_int + inputs: + - arg: string + outputs: + ret: integer + process_int_list: + plugin: dioptra_custom.vc.tensorflow.process_int_list + inputs: + - arg: string + outputs: + ret: image_size + process_float_list: + plugin: dioptra_custom.vc.tensorflow.process_float_list + inputs: + - arg: string + outputs: + ret: image_size + +graph: + image_size: + process_int_list: $image_size_p + + clip_values: + process_float_list: $clip_values_p + + batch_size: + process_int: $batch_size_p + + eps: + process_float: $eps_p + + eps_step: + process_float: $eps_step_p + + minimal: + process_bool: $minimal_p + + true_none: + get_none: 'None' + + true_false: + process_bool: 'False' + + seed: + process_int: $seed_p + + init_rng: + init_rng: $seed + + global_seed: + draw_random_integer: + rng: $init_rng.ret2 + + dataset_seed: + draw_random_integer: + rng: $init_rng.ret2 + + init_tensorflow_results: + init_tensorflow: $global_seed + + make_directories_results: + make_directories: [[$adv_data_dir]] + + log_mlflow_params_result: + log_parameters: + - entry_point_seed: $seed + tensorflow_global_seed: $global_seed + dataset_seed: $dataset_seed + + artifact_uri: + get_uri_for_artifact: + job_id: $training_job_id + + keras_classifier: + load_wrapped_tensorflow_keras_classifier: + artifact_uri: $artifact_uri + classifier_kwargs: + clip_values: $clip_values + dependencies: init_tensorflow_results + + distance_metrics: + get_distance_metric_list: + - - name: l_infinity_norm + func: l_inf_norm + - name: l_1_norm + func: l_1_norm + - name: l_2_norm + func: l_2_norm + - name: cosine_similarity + func: paired_cosine_similarities + - name: euclidean_distance + func: paired_euclidean_distances + - name: manhattan_distance + func: paired_manhattan_distances + - name: wasserstein_distance + func: paired_wasserstein_distances + + dataset: + create_adversarial_fgm_dataset: + data_dir: $data_dir + keras_classifier: $keras_classifier + distance_metrics_list: $distance_metrics + adv_data_dir: $adv_data_dir + batch_size: $batch_size + image_size: $image_size + eps: $eps + eps_step: $eps_step + minimal: $minimal + norm: $norm + dependencies: make_directories_results + + upload_directory: + upload_directory_as_tarball_artifact: + - $adv_data_dir + - $adv_tar_name + dependencies: dataset + + upload_dataset: + upload_data_frame_artifact: + data_frame: $dataset + file_name: $distance_metrics_filename + file_format: csv.gz + file_format_kwargs: + index: $true_false diff --git a/examples/v1-client-tensorflow-mnist-classifier/src/infer.yml b/examples/v1-client-tensorflow-mnist-classifier/src/infer.yml new file mode 100644 index 000000000..1bece3443 --- /dev/null +++ b/examples/v1-client-tensorflow-mnist-classifier/src/infer.yml @@ -0,0 +1,255 @@ +types: + rng: + path: + sequential: + number_null: + union: [number, "null"] + string_null: + union: [string, "null"] + path_string: + union: [string, path] + path_string_null: + union: [path_string, "null"] + dirs: + list: path_string + directory_iterator: + parameters: + mapping: [string, number] + kwargs: + mapping: [string, any] + kwargs_null: + union: [kwargs, "null"] + keras_classifier: + eval_metric: + mapping: [string, any] + eval_metric_results: + mapping: [string, number] + dataframe: + image_size: + tuple: [integer, integer, integer] + norm: + union: [integer, number, string] + +parameters: + run_id: "" + image_size_p: [28, 28, 1] + training_job_id: + fgm_job_id: + adv_tar_name: testing_adversarial_fgm.tar.gz + adv_data_dir: adv_testing + seed_p: -1 + +tasks: + init_rng: + plugin: dioptra_custom.vc.random_rng.init_rng + inputs: + - name: seed + type: integer + required: false + outputs: + - ret1: integer + - ret2: rng + + draw_random_integer: + plugin: dioptra_custom.vc.random_sample.draw_random_integer + inputs: + - rng: rng + - name: low + type: integer + required: false + - name: high + type: integer + required: false + outputs: + value: integer + + init_tensorflow: + plugin: dioptra_custom.vc.backend_configs_tensorflow.init_tensorflow + inputs: + - seed: integer + + log_parameters: + plugin: dioptra_custom.vc.tracking_mlflow.log_parameters + inputs: + - parameters: parameters + + download_all_artifacts_for_job: + plugin: dioptra_custom.vc.artifacts_mlflow.download_all_artifacts_for_job + inputs: + - job_id: string + - artifact_path: string + - name: destination_path + type: string_null + required: false + outputs: + download_path: string + + extract_tarfile: + plugin: dioptra_custom.vc.artifacts_utils.extract_tarfile + inputs: + - filepath: path_string + - name: tarball_read_mode + type: string + required: false + - name: output_dir + type: any + required: false + + create_image_dataset: + plugin: dioptra_custom.vc.data_tensorflow.create_image_dataset + inputs: + - data_dir: string + - subset: string_null + - image_size: image_size + - seed: integer + - name: rescale + type: number + required: false + - name: validation_split + type: number_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + outputs: + iterator: directory_iterator + + load_tensorflow_keras_classifier: + plugin: dioptra_custom.vc.registry_mlflow.load_tensorflow_keras_classifier + inputs: + - uri: string + outputs: + classifier: sequential + get_uri_for_artifact: + plugin: dioptra_custom.vc.artifacts_restapi.get_uri_for_artifact + inputs: + - job_id: string + - name: index + type: integer + required: false + outputs: + ret: string + + evaluate_metrics_tensorflow: + plugin: dioptra_custom.vc.tensorflow.evaluate_metrics_tensorflow + inputs: + - classifier: any + - dataset: any + outputs: + metrics: eval_metric_results + log_metrics: + plugin: dioptra_custom.vc.tracking_mlflow.log_metrics + inputs: + - metrics: eval_metric_results + get_none: + plugin: dioptra_custom.vc.tensorflow.get_none + inputs: + - arg: string + outputs: + ret: "null" + process_float: + plugin: dioptra_custom.vc.tensorflow.process_float + inputs: + - arg: string + outputs: + ret: number + process_bool: + plugin: dioptra_custom.vc.tensorflow.process_bool + inputs: + - arg: string + outputs: + ret: boolean + process_int: + plugin: dioptra_custom.vc.tensorflow.process_int + inputs: + - arg: string + outputs: + ret: integer + process_int_list: + plugin: dioptra_custom.vc.tensorflow.process_int_list + inputs: + - arg: string + outputs: + ret: image_size + process_float_list: + plugin: dioptra_custom.vc.tensorflow.process_float_list + inputs: + - arg: string + outputs: + ret: image_size + + +graph: + + image_size: + process_int_list: $image_size_p + + seed: + process_int: $seed_p + + init_rng: + init_rng: $seed + + tensorflow_global_seed: + draw_random_integer: + rng: $init_rng.ret2 + + dataset_seed: + draw_random_integer: + rng: $init_rng.ret2 + + init_tensorflow_results: + init_tensorflow: $tensorflow_global_seed + + log_mlflow_params_result: + log_parameters: + - entry_point_seed: $seed + tensorflow_global_seed: $tensorflow_global_seed + dataset_seed: $dataset_seed + + adv_tar_path: + download_all_artifacts_for_job: + job_id: $fgm_job_id + artifact_path: $adv_tar_name + + extract_tarfile_results: + extract_tarfile: + filepath: $adv_tar_path + + adv_ds: + create_image_dataset: + data_dir: $adv_data_dir + subset: null + validation_split: null + image_size: $image_size + seed: $dataset_seed + dependencies: + - init_tensorflow_results + - extract_tarfile_results + + model_uri: + get_uri_for_artifact: + job_id: $training_job_id + + classifier: + load_tensorflow_keras_classifier: + uri: $model_uri + dependencies: + - init_tensorflow_results + - adv_ds + + classifier_performance_metrics: + evaluate_metrics_tensorflow: + classifier: $classifier + dataset: $adv_ds + dependencies: + - classifier + + logged_metrics: + log_metrics: + metrics: $classifier_performance_metrics + dependencies: + - classifier_performance_metrics diff --git a/examples/v1-client-tensorflow-mnist-classifier/src/train.yml b/examples/v1-client-tensorflow-mnist-classifier/src/train.yml new file mode 100644 index 000000000..a03403d9f --- /dev/null +++ b/examples/v1-client-tensorflow-mnist-classifier/src/train.yml @@ -0,0 +1,371 @@ +types: + rng: + optimizer: + name_parameters: + mapping: + name: string + parameters: + mapping: [string, any] + metrics_list: + list: + mapping: + name: string + parameters: + mapping: [string, any] + performance_metrics: + metrics: + callbacks_in: + list: + mapping: + name: string + parameters: + mapping: [string, any] + callbacks_out: + mapping: + name: string + parameters: + mapping: [string, any] + directory_iterator: + parameters: + mapping: [string, number] + image_size: + tuple: [integer, integer, integer] + sequential: + fit_kwargs: + mapping: [string, any] + fit_kwargs_null: + union: + - mapping: [string, any] + - "null" + str_null: + union: [string, "null"] + num_null: + union: [number, "null"] + +parameters: + seed_p: -1 + optimizer_name: Adam + learning_rate_p: 0.001 + training_dir: /dioptra/data/Mnist/training + testing_dir: /dioptra/data/Mnist/testing + image_size_p: [28, 28, 1] + validation_split_p: 0.2 + batch_size_p: 32 + model_architecture: le_net + epochs_p: 30 + register_model_name: "mnist_classifier" + +tasks: + init_rng: + plugin: dioptra_custom.vc.random_rng.init_rng + inputs: + - name: seed + type: integer + required: false + outputs: + - ret1: integer + - ret2: rng + + draw_random_integer: + plugin: dioptra_custom.vc.random_sample.draw_random_integer + inputs: + - rng: rng + - name: low + type: integer + required: false + - name: high + type: integer + required: false + outputs: + value: integer + + init_tensorflow: + plugin: dioptra_custom.vc.backend_configs_tensorflow.init_tensorflow + inputs: + - seed: integer + + log_parameters: + plugin: dioptra_custom.vc.tracking_mlflow.log_parameters + inputs: + - parameters: parameters + + get_optimizer: + plugin: dioptra_custom.vc.tensorflow.get_optimizer + inputs: + - name: optimizer + type: string + - learning_rate: number + outputs: + optimizer: optimizer + + get_performance_metrics: + plugin: dioptra_custom.vc.tensorflow.get_performance_metrics + inputs: + - metrics_list: metrics_list + outputs: + performance_metrics: performance_metrics + + get_model_callbacks: + plugin: dioptra_custom.vc.tensorflow.get_model_callbacks + inputs: + - callbacks_list: callbacks_in + outputs: + callbacks: callbacks_out + + create_image_dataset: + plugin: dioptra_custom.vc.data_tensorflow.create_image_dataset + inputs: + - data_dir: string + - subset: str_null + - image_size: image_size + - seed: integer + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + outputs: + dataset: directory_iterator + + get_n_classes_from_directory_iterator: + plugin: dioptra_custom.vc.data_tensorflow.get_n_classes_from_directory_iterator + inputs: + - ds: directory_iterator + outputs: + num_classes: integer + + init_classifier: + plugin: dioptra_custom.vc.estimators_keras_classifiers.init_classifier + inputs: + - model_architecture: string + - optimizer: optimizer + - metrics: performance_metrics + - input_shape: image_size + - n_classes: integer + - name: loss + type: string + required: false + outputs: + classifier: sequential + + fit: + plugin: dioptra_custom.vc.estimators_methods.fit + inputs: + - estimator: any + - x: any + - name: y + type: any + required: false + - name: fit_kwargs + type: fit_kwargs_null + required: false + + evaluate_metrics_tensorflow: + plugin: dioptra_custom.vc.tensorflow.evaluate_metrics_tensorflow + inputs: + - classifier: sequential + - dataset: directory_iterator + outputs: + metrics: metrics + + log_metrics: + plugin: dioptra_custom.vc.tracking_mlflow.log_metrics + inputs: + - metrics: metrics + + log_tensorflow_keras_estimator: + plugin: dioptra_custom.vc.tracking_mlflow.log_tensorflow_keras_estimator + inputs: + - estimator: sequential + - model_dir: string + + add_model_to_registry: + plugin: dioptra_custom.vc.mlflow.add_model_to_registry + inputs: + - name: name + type: string + - model_dir: string + get_none: + plugin: dioptra_custom.vc.tensorflow.get_none + inputs: + - arg: string + outputs: + ret: "null" + process_float: + plugin: dioptra_custom.vc.tensorflow.process_float + inputs: + - arg: string + outputs: + ret: number + process_int: + plugin: dioptra_custom.vc.tensorflow.process_int + inputs: + - arg: string + outputs: + ret: integer + process_int_list: + plugin: dioptra_custom.vc.tensorflow.process_int_list + inputs: + - arg: string + outputs: + ret: image_size + +graph: + batch_size: + process_int: $batch_size_p + + epochs: + process_int: $epochs_p + + validation_split: + process_float: $validation_split_p + + true_none: + get_none: 'h' + + image_size: + process_int_list: $image_size_p + + learning_rate: + process_float: $learning_rate_p + + seed: + process_int: $seed_p + + init_rng: + init_rng: $seed + + global_seed: + draw_random_integer: + rng: $init_rng.ret2 + + dataset_seed: + draw_random_integer: + rng: $init_rng.ret2 + + init_tensorflow: + init_tensorflow: $global_seed + + log_params: + log_parameters: + - entry_point_seed: $init_rng.ret1 + tensorflow_global_seed: $global_seed + dataset_seed: $dataset_seed + + optimizer: + get_optimizer: + optimizer: $optimizer_name + learning_rate: $learning_rate + dependencies: + - init_tensorflow + + perf_metrics: + get_performance_metrics: + - - name: CategoricalAccuracy + parameters: { name: accuracy } + - name: Precision + parameters: { name: precision } + - name: Recall + parameters: { name: recall } + - name: AUC + parameters: { name: auc } + dependencies: + - init_tensorflow + + callbacks: + get_model_callbacks: + - - name: EarlyStopping + parameters: + monitor: val_loss + min_delta: .01 + patience: 5 + restore_best_weights: true + dependencies: + - init_tensorflow + + training_dataset: + create_image_dataset: + data_dir: $training_dir + subset: training + image_size: $image_size + seed: $dataset_seed + validation_split: $validation_split + batch_size: $batch_size + dependencies: + - init_tensorflow + + validation_dataset: + create_image_dataset: + data_dir: $training_dir + subset: validation + image_size: $image_size + seed: $dataset_seed + validation_split: $validation_split + batch_size: $batch_size + dependencies: + - init_tensorflow + + testing_dataset: + create_image_dataset: + data_dir: $testing_dir + subset: null + image_size: $image_size + seed: $dataset_seed + validation_split: null + batch_size: $batch_size + dependencies: + - init_tensorflow + + num_classes: + get_n_classes_from_directory_iterator: $training_dataset + + classifier: + init_classifier: + model_architecture: $model_architecture + optimizer: $optimizer + metrics: $perf_metrics + input_shape: $image_size + n_classes: $num_classes + dependencies: + - init_tensorflow + + model: + fit: + estimator: $classifier + x: $training_dataset + fit_kwargs: + nb_epochs: $epochs + validation_data: $validation_dataset + callbacks: $callbacks + verbose: 2 + + eval_metrics_tensorflow: + evaluate_metrics_tensorflow: + - $classifier + - $testing_dataset + dependencies: + - model + + log_metrics: + log_metrics: $eval_metrics_tensorflow + + log_keras_estimator: + log_tensorflow_keras_estimator: + - $classifier + - model + dependencies: + - model + + add_model_to_registry: + add_model_to_registry: + - $register_model_name + - model + dependencies: + - log_keras_estimator From 247781cccd7d7787c71cd67f5fbf808ea05932da Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 18 Sep 2024 13:02:09 -0400 Subject: [PATCH 02/18] examples: remove unneeded files & add defenses --- examples/mnist-classifier-demo/demo.ipynb | 237 +++++-- .../mnist-classifier-demo/src/defense.yml | 212 ++++++ examples/mnist-classifier-demo/src/infer.yml | 12 +- examples/scripts/register_queues.py | 101 +++ examples/scripts/register_task_plugins.py | 212 ++++++ .../defenses_image_preprocessing.py | 15 +- .../feature_squeezing/cw_inf_plugin.py | 2 +- .../feature_squeezing/cw_l2_plugin.py | 2 +- .../feature_squeezing/jsma_plugin.py | 2 +- .../feature_squeezing/squeeze_plugin.py | 2 +- .../defenses_image_preprocessing.py | 32 +- .../dioptra_custom/fgm_mnist_demo/plugins.py | 26 +- .../dioptra_custom/vc/artifacts_exceptions.py | 23 - .../dioptra_custom/vc/artifacts_mlflow.py | 241 ------- .../dioptra_custom/vc/artifacts_restapi.py | 151 ---- .../dioptra_custom/vc/artifacts_utils.py | 117 --- .../dioptra_custom/vc/attacks_fgm.py | 305 -------- .../vc/backend_configs_tensorflow.py | 52 -- .../task-plugins/dioptra_custom/vc/builtin.py | 208 ------ .../dioptra_custom/vc/data_tensorflow.py | 128 ---- .../vc/estimators_keras_classifiers.py | 231 ------ .../dioptra_custom/vc/estimators_methods.py | 122 ---- .../dioptra_custom/vc/import_keras.py | 65 -- .../dioptra_custom/vc/metrics_distance.py | 307 -------- .../dioptra_custom/vc/metrics_exceptions.py | 27 - .../task-plugins/dioptra_custom/vc/mlflow.py | 103 --- .../dioptra_custom/vc/random_rng.py | 56 -- .../dioptra_custom/vc/random_sample.py | 89 --- .../dioptra_custom/vc/registry_art.py | 107 --- .../dioptra_custom/vc/registry_mlflow.py | 120 ---- .../dioptra_custom/vc/tensorflow.py | 112 --- .../dioptra_custom/vc/tracking_mlflow.py | 99 --- .../README.md | 22 - .../demo.ipynb | 669 ------------------ .../src/fgm.yml | 320 --------- .../src/infer.yml | 255 ------- .../src/train.yml | 371 ---------- 37 files changed, 757 insertions(+), 4398 deletions(-) create mode 100644 examples/mnist-classifier-demo/src/defense.yml create mode 100644 examples/scripts/register_queues.py create mode 100644 examples/scripts/register_task_plugins.py rename examples/task-plugins/dioptra_custom/{vc => fgm_mnist_demo}/defenses_image_preprocessing.py (89%) delete mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/artifacts_utils.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/attacks_fgm.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/builtin.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/data_tensorflow.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/estimators_methods.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/import_keras.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/metrics_distance.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/mlflow.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/random_rng.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/random_sample.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/registry_art.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/registry_mlflow.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/tensorflow.py delete mode 100644 examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py delete mode 100644 examples/v1-client-tensorflow-mnist-classifier/README.md delete mode 100644 examples/v1-client-tensorflow-mnist-classifier/demo.ipynb delete mode 100644 examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml delete mode 100644 examples/v1-client-tensorflow-mnist-classifier/src/infer.yml delete mode 100644 examples/v1-client-tensorflow-mnist-classifier/src/train.yml diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 12e70a854..8104d0c83 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -31,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 80, "metadata": { "tags": [] }, @@ -53,7 +53,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 81, "metadata": {}, "outputs": [], "source": [ @@ -176,7 +176,7 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 131, "metadata": { "tags": [] }, @@ -194,9 +194,28 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 132, "metadata": {}, - "outputs": [], + "outputs": [ + { + "name": "stdout", + "output_type": "stream", + "text": [ + "\u001b[2m2024-09-18 12:52:25\u001b[0m [\u001b[31m\u001b[1merror \u001b[0m] \u001b[1mError code 400 returned. \u001b[0m \u001b[36mdata\u001b[0m=\u001b[35m{'username': 'pluginuser', 'email': 'pluginuser@dioptra.nccoe.nist.gov', 'password': 'pleasemakesuretoPLUGINthecomputer', 'confirmPassword': 'pleasemakesuretoPLUGINthecomputer'}\u001b[0m \u001b[36mmethod\u001b[0m=\u001b[35mPOST\u001b[0m \u001b[36mresponse\u001b[0m=\u001b[35m{\"message\": \"Bad Request - The username on the registration form is not available. Please select another and resubmit.\"}\n", + "\u001b[0m \u001b[36murl\u001b[0m=\u001b[35mhttp://localhost:20080/api/v1/users/\u001b[0m\n" + ] + }, + { + "data": { + "text/plain": [ + "{'username': 'pluginuser', 'status': 'Login successful'}" + ] + }, + "execution_count": 132, + "metadata": {}, + "output_type": "execute_result" + } + ], "source": [ "try:\n", " client.users.create('pluginuser','pluginuser@dioptra.nccoe.nist.gov','pleasemakesuretoPLUGINthecomputer','pleasemakesuretoPLUGINthecomputer')\n", @@ -214,20 +233,22 @@ }, { "cell_type": "code", - "execution_count": null, + "execution_count": 154, "metadata": {}, "outputs": [], "source": [ - "def wait_for_job(job, job_name):\n", + "def wait_for_job(job, job_name, quiet=False):\n", " n = 0\n", " while job['status'] != 'finished': \n", " job = client.jobs.get_by_id(job['id'])\n", " time.sleep(1)\n", - " clear_output(wait=True)\n", - " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", + " if not quiet:\n", + " clear_output(wait=True)\n", + " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", " n += 1\n", - " clear_output(wait=True)\n", - " display(f\"Job finished. Starting {job_name} job.\")\n", + " if not quiet:\n", + " clear_output(wait=True)\n", + " display(f\"Job finished. Starting {job_name} job.\")\n", " " ] }, @@ -240,14 +261,15 @@ }, { "cell_type": "code", - "execution_count": 63, + "execution_count": 134, "metadata": {}, "outputs": [], "source": [ "#delete_all(client)\n", "experiment_id, train_ep, queue_id = upload_experiment(client, 'src/train.yml','train','training a classifier on MNIST', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "experiment_id, fgm_ep, queue_id = upload_experiment(client, 'src/fgm.yml','fgm','generating examples on mnist_classifier using the fgm attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" + "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" ] }, { @@ -261,7 +283,7 @@ }, { "cell_type": "code", - "execution_count": 64, + "execution_count": 135, "metadata": {}, "outputs": [], "source": [ @@ -290,7 +312,7 @@ }, { "cell_type": "code", - "execution_count": 65, + "execution_count": 137, "metadata": {}, "outputs": [ { @@ -328,7 +350,104 @@ }, { "cell_type": "code", - "execution_count": 66, + "execution_count": 149, + "metadata": {}, + "outputs": [], + "source": [ + "def infer(experiment_id, queue_id, infer_ep, prev_job, job_time_limit='1h', defense=False):\n", + " dd = \"def_testing\" if defense else \"adv_testing\"\n", + " tn = \"testing_adversarial_def.tar.gz\" if defense else \"testing_adversarial_fgm.tar.gz\"\n", + " wait_for_job(prev_job, 'infer', quiet=False)\n", + " infer_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"infer job for {experiment_id}\",\n", + " queue_id,\n", + " infer_ep,\n", + " {\"job_id\": str(prev_job['id']),\n", + " \"tar_name\": tn,\n", + " \"data_dir\": dd,\n", + " \"model_name\": MODEL_NAME, \"model_version\": str(-1)},\n", + " job_time_limit\n", + " )\n", + " return infer_job" + ] + }, + { + "cell_type": "code", + "execution_count": 150, + "metadata": {}, + "outputs": [], + "source": [ + "from mlflow.tracking import MlflowClient\n", + "from uuid import UUID\n", + "\n", + "def get_metrics(job):\n", + " wait_for_job(job, 'metrics', quiet=True)\n", + " mlflow_client = MlflowClient()\n", + " mlflow_runid = UUID(client.jobs.get_mlflow_run_id(job['id'])['mlflowRunId']).hex\n", + " mlflow_run = mlflow_client.get_run(mlflow_runid)\n", + " return mlflow_run.data.metrics" + ] + }, + { + "cell_type": "code", + "execution_count": 140, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting infer job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "infer_fgm = infer(experiment_id, queue_id, infer_ep, fgm_job, defense=False)" + ] + }, + { + "cell_type": "code", + "execution_count": 141, + "metadata": {}, + "outputs": [], + "source": [ + "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" + ] + }, + { + "cell_type": "code", + "execution_count": 142, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting defense job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "job_time_limit = '1h'\n", + "wait_for_job(fgm_job, 'defense')\n", + "spatial_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"defense job for {experiment_id}\",\n", + " queue_id,\n", + " defense_ep,\n", + " {\"job_id\": str(fgm_job['id']),\"def_type\":\"spatial_smoothing\"}, # -1 means get the latest\n", + " job_time_limit\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 143, "metadata": {}, "outputs": [ { @@ -341,57 +460,81 @@ "output_type": "display_data" } ], + "source": [ + "infer_spatial = infer(experiment_id, queue_id, infer_ep, spatial_job, defense=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 144, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting defense job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], "source": [ "job_time_limit = '1h'\n", - "wait_for_job(fgm_job, 'infer')\n", - "infer_job = client.experiments.create_jobs_by_experiment_id(\n", + "wait_for_job(fgm_job, 'defense')\n", + "jpeg_comp_job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", - " f\"infer job for {experiment_id}\",\n", + " f\"defense job for {experiment_id}\",\n", " queue_id,\n", - " infer_ep,\n", - " {\"fgm_job_id\": str(fgm_job['id']), \"model_name\": MODEL_NAME, \"model_version\": str(-1)},\n", + " defense_ep,\n", + " {\n", + " \"job_id\": str(fgm_job['id']),\n", + " \"def_type\":\"jpeg_compression\"\n", + " }, # -1 means get the latest\n", " job_time_limit\n", ")" ] }, { "cell_type": "code", - "execution_count": 70, + "execution_count": 145, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting infer job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "infer_jpeg = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job, defense=True)" + ] + }, + { + "cell_type": "code", + "execution_count": 155, "metadata": {}, "outputs": [ { "name": "stdout", "output_type": "stream", "text": [ - "Training accuracy:\n", - "{'accuracy': 0.9760833382606506,\n", - " 'auc': 0.9990718364715576,\n", - " 'loss': 0.07672422379255295,\n", - " 'precision': 0.9799415469169617,\n", - " 'recall': 0.9721999764442444,\n", - " 'training_time_in_minutes': 0.3090300166666667}\n", - "FGM accuracy:\n", - "{'accuracy': 0.16326121985912323,\n", - " 'auc': 0.6759902238845825,\n", - " 'loss': 2.7856907844543457,\n", - " 'precision': 0.09174499660730362,\n", - " 'recall': 0.044971954077482224}\n" + "{'training_time_in_minutes': 0.32976753333333336, 'accuracy': 0.9775166511535645, 'auc': 0.9987682700157166, 'loss': 0.07407279312610626, 'precision': 0.9809511303901672, 'recall': 0.9750000238418579}\n", + "{'accuracy': 0.11217948794364929, 'auc': 0.6169368028640747, 'precision': 0.09878776967525482, 'loss': 3.25475811958313, 'recall': 0.0546875}\n", + "{'accuracy': 0.11548477411270142, 'auc': 0.6298573613166809, 'loss': 3.010637044906616, 'precision': 0.10013880580663681, 'recall': 0.05058092996478081}\n", + "{'auc': 0.617414653301239, 'precision': 0.12656284868717194, 'accuracy': 0.1341145783662796, 'loss': 2.9532642364501953, 'recall': 0.05779246613383293}\n" ] } ], "source": [ - "from mlflow.tracking import MlflowClient\n", - "from uuid import UUID\n", - "mlflow_client = MlflowClient()\n", - "mlflow_runid = UUID(client.jobs.get_mlflow_run_id(training_job['id'])['mlflowRunId']).hex\n", - "mlflow_run = mlflow_client.get_run(mlflow_runid)\n", - "print(\"Training metrics:\")\n", - "pprint.pprint(mlflow_run.data.metrics)\n", - " \n", - "mlflow_runid = UUID(client.jobs.get_mlflow_run_id(infer_job['id'])['mlflowRunId']).hex\n", - "mlflow_run = mlflow_client.get_run(mlflow_runid)\n", - "print(\"FGM metrics:\")\n", - "pprint.pprint(mlflow_run.data.metrics)" + "print(get_metrics(training_job))\n", + "print(get_metrics(infer_fgm))\n", + "print(get_metrics(infer_jpeg))\n", + "print(get_metrics(infer_spatial))" ] } ], diff --git a/examples/mnist-classifier-demo/src/defense.yml b/examples/mnist-classifier-demo/src/defense.yml new file mode 100644 index 000000000..25aa93fdf --- /dev/null +++ b/examples/mnist-classifier-demo/src/defense.yml @@ -0,0 +1,212 @@ +types: + path: + classifier: + artifact: + model_list: + list: classifier + artifact_list: + list: artifact + path_string: + union: [string, path] + list_path_string: + list: path_string + kwargs: + mapping: [string, any] + kwargs_null: + union: [kwargs, "null"] + distance_metric_request: + mapping: [string, string] + distance_metrics_requests: + list: distance_metric_request + image_size: + tuple: [integer, integer, integer] + clip_values: + tuple: [number, number, number] + norm: + union: [integer, number, string] + str_null: + union: [string, "null"] + list_str_null: + list: str_null + num_null: + union: [number, "null"] + directory_iterator: + name_parameters: + mapping: + name: string + parameters: + mapping: [string, any] + metrics_list: + list: name_parameters + +parameters: + data_dir: /dioptra/data/Mnist/testing + image_size: [28, 28, 1] + job_id: + adv_tar_name: testing_adversarial_fgm.tar.gz + adv_data_dir: adv_testing + def_tar_name: testing_adversarial_def.tar.gz + def_data_dir: def_testing + distance_metrics_filename: distance_metrics.csv + model_name: mnist_classifier + model_version: -1 + clip_values: [0, 1] + batch_size: 32 + eps: 0.3 + eps_step: 0.1 + minimal: false + norm: "inf" + seed: -1 + def_type: spatial_smoothing + +tasks: + load_artifacts_for_job: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job + inputs: + - job_id: string + - name: extract_files + type: list_path_string + required: false + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: data_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_model + inputs: + - name: model_name + type: string + required: false + - name: model_version + type: integer + required: false + - name: imagenet_preprocessing + type: boolean + required: false + - name: art + type: boolean + required: false + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: classifier + + augment_data: + plugin: dioptra_custom.fgm_mnist_demo.plugins.augment_data + inputs: + - dataset: any + - def_data_dir: path_string + - image_size: image_size + - distance_metrics: distance_metrics_requests + - name: batch_size + type: integer + required: false + - name: def_type + type: string + required: false + - name: defense_kwargs + type: kwargs_null + required: false + outputs: + ret: artifact + + + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + +graph: + load: + load_artifacts_for_job: + job_id: $job_id + extract_files: [$adv_tar_name] + + dataset: + load_dataset: + ep_seed: $seed + data_dir: $adv_data_dir + subsets: [testing] + image_size: $image_size + + model: + load_model: + model_name: $model_name + model_version: $model_version + art: true + classifier_kwargs: + clip_values: $clip_values + + + defended: + augment_data: + dataset: $dataset.testing + def_data_dir: $def_data_dir + image_size: $image_size + batch_size: $batch_size + def_type: $def_type + distance_metrics: + - name: l_infinity_norm + func: l_inf_norm + - name: l_1_norm + func: l_1_norm + - name: l_2_norm + func: l_2_norm + - name: cosine_similarity + func: paired_cosine_similarities + - name: euclidean_distance + func: paired_euclidean_distances + - name: manhattan_distance + func: paired_manhattan_distances + - name: wasserstein_distance + func: paired_wasserstein_distances + + save: + save_artifacts_and_models: + artifacts: + - type: tarball + adv_data_dir: $def_data_dir + adv_tar_name: $def_tar_name + - type: dataframe + data_frame: $defended + file_name: $distance_metrics_filename + file_format: csv.gz + file_format_kwargs: + index: false \ No newline at end of file diff --git a/examples/mnist-classifier-demo/src/infer.yml b/examples/mnist-classifier-demo/src/infer.yml index 8e2071edc..bc659e585 100644 --- a/examples/mnist-classifier-demo/src/infer.yml +++ b/examples/mnist-classifier-demo/src/infer.yml @@ -22,9 +22,9 @@ parameters: image_size: [28, 28, 1] model_name: mnist_classifier model_version: -1 - fgm_job_id: - adv_tar_name: testing_adversarial_fgm.tar.gz - adv_data_dir: adv_testing + job_id: + tar_name: testing_adversarial_fgm.tar.gz + data_dir: adv_testing seed: -1 tasks: @@ -100,13 +100,13 @@ tasks: graph: load: load_artifacts_for_job: - job_id: $fgm_job_id - extract_files: [$adv_tar_name] + job_id: $job_id + extract_files: [$tar_name] dataset: load_dataset: ep_seed: $seed - data_dir: $adv_data_dir + data_dir: $data_dir subsets: [testing] image_size: $image_size diff --git a/examples/scripts/register_queues.py b/examples/scripts/register_queues.py new file mode 100644 index 000000000..fbbf4f949 --- /dev/null +++ b/examples/scripts/register_queues.py @@ -0,0 +1,101 @@ +#!/usr/bin/env python +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""Register the queues used in Dioptra's examples and demos. + +Functions: + register_queues: The Click command for registering the queues used in Dioptra's + examples and demos. +""" +from __future__ import annotations + +import click +from rich.console import Console + +# The try/except ImportError blocks allow this script to be invoked using: +# python ./scripts/register_task_plugins.py # OR +# python -m scripts.register_task_plugins +try: + from .client import DioptraClient + +except ImportError: + from client import DioptraClient + +try: + from .utils import RichConsole + +except ImportError: + from utils import RichConsole + +_CONTEXT_SETTINGS = dict( + help_option_names=["-h", "--help"], + show_default=True, +) + + +@click.command(context_settings=_CONTEXT_SETTINGS) +@click.option( + "--queue", + multiple=True, + type=click.STRING, + default=["tensorflow_cpu", "tensorflow_gpu", "pytorch_cpu", "pytorch_gpu"], + help="The queue name to register.", +) +@click.option( + "--api-url", + type=click.STRING, + default="http://localhost", + help="The url to the Dioptra REST API.", +) +def register_queues(queue, api_url): + """Register the queues used in Dioptra's examples and demos.""" + + console = RichConsole(Console()) + client = DioptraClient(address=api_url) + + console.print_title("Dioptra Examples - Register Queues") + console.print_parameter("queue", value=f"[default not bold]{', '.join(queue)}[/]") + console.print_parameter("api_url", value=f"[default not bold]{api_url}[/]") + + for name in queue: + response = client.get_queue_by_name(name=name) + + if response is None or "Not Found" in response.get("message", []): + response = client.register_queue(name=name) + response_after = client.get_queue_by_name(name=name) + + if response_after is None or "Not Found" in response_after.get("message", []): + raise RuntimeError( + f"Failed to register the queue {name!r}. Is the API URL correct?" + ) + + console.print_success( + "[bold green]Success![/] [default not bold]Registered the queue " + f"{name!r}.[/]" + ) + + else: + console.print_info( + f"[bold white]Skipped.[/] [default not bold]The queue {name!r} is " + "already registered.[/]" + ) + + console.print_success("[default no bold]Queue registration is complete.[/]") + + +if __name__ == "__main__": + register_queues() diff --git a/examples/scripts/register_task_plugins.py b/examples/scripts/register_task_plugins.py new file mode 100644 index 000000000..52a319b77 --- /dev/null +++ b/examples/scripts/register_task_plugins.py @@ -0,0 +1,212 @@ +#!/usr/bin/env python +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""Register the custom task plugins used in Dioptra's examples and demos. + +Classes: + CustomTaskPlugin: A dictionary containing the name and path to the tarball for + each custom task plugin package. + +Functions: + make_custom_plugins: Create a tarball for each custom task plugin package under a + directory. + upload_custom_plugin_package: Upload a custom task plugin package via the Dioptra + REST API. + delete_custom_plugin_package: Delete a custom task plugin package via the Dioptra + REST API. + register_task_plugins: The Click command for registering the custom task plugins + used in Dioptra's examples and demos. +""" +from __future__ import annotations + +from pathlib import Path +from tempfile import TemporaryDirectory +from typing import Iterable, TypedDict + +import click +from rich.console import Console + +# The try/except ImportError blocks allow this script to be invoked using: +# python ./scripts/register_task_plugins.py # OR +# python -m scripts.register_task_plugins +try: + from .client import DioptraClient + +except ImportError: + from client import DioptraClient + +try: + from .utils import RichConsole, make_tar + +except ImportError: + from utils import RichConsole, make_tar + +_CONTEXT_SETTINGS = dict( + help_option_names=["-h", "--help"], + show_default=True, +) + + +class CustomTaskPlugin(TypedDict): + name: str + path: Path + + +def make_custom_plugins( + plugins_dir: Path, output_dir: Path +) -> Iterable[CustomTaskPlugin]: + """Create a tarball for each custom task plugin package under a directory. + + Args: + plugins_dir: The directory containing the custom task plugin subdirectories. + output_dir: The directory where the tarballs will be saved. + + Yields: + A dictionary containing the name and path to the tarball for each custom task + plugin package. + """ + plugin_packages: list[Path] = [x for x in plugins_dir.glob("*/*") if x.is_dir()] + + for plugin_package in plugin_packages: + plugin_name = plugin_package.name + plugin_path = make_tar( + source_dir=[plugin_package], + tarball_filename=f"custom-plugins-{plugin_name}.tar.gz", + working_dir=output_dir, + ) + yield CustomTaskPlugin(name=plugin_name, path=plugin_path) + + +def upload_custom_plugin_package( + client: DioptraClient, custom_plugin: CustomTaskPlugin +) -> None: + """Upload a custom task plugin package via the Dioptra REST API. + + Args: + client: The Dioptra REST API client. + custom_plugin: A dictionary containing the name and path to the tarball for + the custom task plugin package. + + Raises: + RuntimeError: If the custom task plugin package fails to upload. + """ + response = client.upload_custom_plugin_package( + custom_plugin_name=custom_plugin["name"], + custom_plugin_file=custom_plugin["path"], + ) + response_after = client.get_custom_task_plugin(name=custom_plugin["name"]) + + if response_after is None or "Not Found" in response_after.get("message", []): + raise RuntimeError( + "Failed to register the custom task plugin " + f"{custom_plugin['name']!r}. Is the API URL correct?" + ) + + +def delete_custom_plugin_package( + client: DioptraClient, custom_plugin: CustomTaskPlugin +) -> None: + """Delete a custom task plugin package via the Dioptra REST API. + + Args: + client: The Dioptra REST API client. + custom_plugin: A dictionary containing the name and path to the tarball for + the custom task plugin package. + + Raises: + RuntimeError: If the custom task plugin package fails to delete. + """ + response = client.delete_custom_task_plugin(custom_plugin["name"]) + + if response is None or "Success" not in response.get("status", []): + raise RuntimeError( + "Failed to delete the custom task plugin " + f"{custom_plugin['name']!r}. Is the API URL correct?" + ) + + +@click.command(context_settings=_CONTEXT_SETTINGS) +@click.option( + "--plugins-dir", + type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), + default="./task-plugins", + help=( + "The path to the directory containing the custom task plugin subdirectories." + ), +) +@click.option( + "--api-url", + type=click.STRING, + default="http://localhost", + help="The url to the Dioptra REST API.", +) +@click.option( + "-f", + "--force", + type=click.BOOL, + is_flag=True, + show_default=True, + default=False, + help="Remove and re-register any existing custom task plugins.", +) +def register_task_plugins(plugins_dir, api_url, force): + """Register the custom task plugins used in Dioptra's examples and demos.""" + + console = RichConsole(Console()) + client = DioptraClient(address=api_url) + + console.print_title("Dioptra Examples - Register Custom Task Plugins") + console.print_parameter("plugins_dir", value=click.format_filename(plugins_dir)) + console.print_parameter("api_url", value=f"[default not bold]{api_url}[/]") + console.print_parameter("force", value=f"{force}") + + with TemporaryDirectory() as temp_dir: + custom_plugins = make_custom_plugins( + plugins_dir=plugins_dir, output_dir=Path(temp_dir) + ) + + for custom_plugin in custom_plugins: + response = client.get_custom_task_plugin(name=custom_plugin["name"]) + + if response is None or "Not Found" in response.get("message", []): + upload_custom_plugin_package(client=client, custom_plugin=custom_plugin) + console.print_success( + "[bold green]Success![/] [default not bold]Registered the custom " + f"task plugin {custom_plugin['name']!r}.[/]" + ) + + elif force: + delete_custom_plugin_package(client=client, custom_plugin=custom_plugin) + upload_custom_plugin_package(client=client, custom_plugin=custom_plugin) + console.print_success( + "[bold yellow]Overwritten.[/] [default not bold]Removed and " + f"re-registered the custom task plugin {custom_plugin['name']!r}.[/]" + ) + + else: + console.print_info( + "[bold white]Skipped.[/] [default not bold]The custom task plugin " + f"{custom_plugin['name']!r} is already registered.[/]" + ) + + console.print_success( + "[default no bold]Custom task plugin registration is complete.[/]" + ) + + +if __name__ == "__main__": + register_task_plugins() diff --git a/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py b/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py index dafd54a0b..d339167e1 100644 --- a/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py +++ b/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py @@ -66,20 +66,17 @@ @require_package("art", exc_type=ARTDependencyError) @require_package("tensorflow", exc_type=TensorflowDependencyError) def create_defended_dataset( + data_flow: Any, data_dir: str, def_data_dir: Union[str, Path], image_size: Tuple[int, int, int], distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, batch_size: int = 32, - label_mode: str = "categorical", def_type: str = "spatial_smoothing", defense_kwargs: Optional[Dict[str, Any]] = None, ) -> pd.DataFrame: distance_metrics_list = distance_metrics_list or [] - color_mode: str = "rgb" if image_size[2] == 3 else "grayscale" - rescale: float = 1.0 if image_size[2] == 3 else 1.0 / 255 clip_values: Tuple[float, float] = (0, 255) if image_size[2] == 3 else (0, 1.0) - target_size: Tuple[int, int] = image_size[:2] def_data_dir = Path(def_data_dir) defense = _init_defense( @@ -88,16 +85,6 @@ def create_defended_dataset( defense_kwargs=defense_kwargs, ) - data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) - - data_flow = data_generator.flow_from_directory( - directory=data_dir, - target_size=target_size, - color_mode=color_mode, - class_mode=label_mode, - batch_size=batch_size, - shuffle=False, - ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py index 5697aa722..923d10355 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py @@ -97,7 +97,7 @@ def create_adversarial_cw_inf_dataset( color_mode=color_mode, class_mode=label_mode, batch_size=batch_size, - shuffle=True, # false, + shuffle=True, # alse, ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py index a8986d439..d8a2d9a3a 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py @@ -102,7 +102,7 @@ def create_adversarial_cw_l2_dataset( color_mode=color_mode, class_mode=label_mode, batch_size=batch_size, - shuffle=True, # false, + shuffle=True, # alse, ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py index bf9940943..bd8ab8709 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py @@ -93,7 +93,7 @@ def create_adversarial_jsma_dataset( color_mode=color_mode, class_mode=label_mode, batch_size=batch_size, - shuffle=True, # false, + shuffle=True, # alse, ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py index f14e83b68..1b0b11536 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py @@ -84,7 +84,7 @@ def feature_squeeze( run_id=run_id, ) - batch_size = 32 # There is currently a bug preventing batch size from getting passed in correctly + batch_size = 32 # There is currently a bug preventing batch size from getting passsed in correctly tensorflow_global_seed: int = rng.integers(low=0, high=2**31 - 1) dataset_seed: int = rng.integers(low=0, high=2**31 - 1) diff --git a/examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py similarity index 89% rename from examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py rename to examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py index b696dad73..2ceae26bb 100644 --- a/examples/task-plugins/dioptra_custom/vc/defenses_image_preprocessing.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py @@ -66,40 +66,24 @@ @require_package("art", exc_type=ARTDependencyError) @require_package("tensorflow", exc_type=TensorflowDependencyError) def create_defended_dataset( - data_dir: str, - dataset_name: str, + data_flow: Any, def_data_dir: Union[str, Path], image_size: Tuple[int, int, int], distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, batch_size: int = 32, - label_mode: str = "categorical", def_type: str = "spatial_smoothing", - **kwargs, + defense_kwargs: Optional[Dict[str, Any]] = None, ) -> pd.DataFrame: distance_metrics_list = distance_metrics_list or [] - color_mode: str = "rgb" if image_size[2] == 3 else "grayscale" - rescale: float = 1.0 if image_size[2] == 3 else 1.0 / 255 clip_values: Tuple[float, float] = (0, 255) if image_size[2] == 3 else (0, 1.0) - target_size: Tuple[int, int] = image_size[:2] def_data_dir = Path(def_data_dir) - data_dir = Path(data_dir) / dataset_name - - defense = init_defense( + defense_kwargs = {} if defense_kwargs is None else defense_kwargs + defense = _init_defense( clip_values=clip_values, def_type=def_type, - **kwargs, + defense_kwargs=defense_kwargs, ) - data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) - - data_flow = data_generator.flow_from_directory( - directory=data_dir, - target_size=target_size, - color_mode=color_mode, - class_mode=label_mode, - batch_size=batch_size, - shuffle=False, - ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) @@ -149,10 +133,10 @@ def create_defended_dataset( return pd.DataFrame(distance_metrics_) -def init_defense(clip_values, def_type, **kwargs): +def _init_defense(clip_values, def_type, defense_kwargs): defense = DEFENSE_LIST[def_type]( clip_values=clip_values, - **kwargs, + **defense_kwargs, ) return defense @@ -198,4 +182,4 @@ def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> No ) mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) - LOGGER.info("logged distance-based metric", metric_name=metric_name) + LOGGER.info("logged distance-based metric", metric_name=metric_name) \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py index e47cbd34b..08db3cd80 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -43,6 +43,7 @@ from .metrics_distance import get_distance_metric_list from .attacks_fgm import fgm from .artifacts_mlflow import upload_directory_as_tarball_artifact, upload_data_frame_artifact, download_all_artifacts +from .defenses_image_preprocessing import create_defended_dataset LOGGER: BoundLogger = structlog.stdlib.get_logger() @@ -208,7 +209,7 @@ def attack( minimal: bool = False, norm: Union[int, float, str] = np.inf, ): - make_directories(adv_data_dir) + make_directories([adv_data_dir] ) distance_metrics_list = get_distance_metric_list(distance_metrics) fgm_dataset = fgm( data_flow=dataset, @@ -236,8 +237,27 @@ def compute_metrics( log_metrics(metrics) @pyplugs.register -def augment_data(): - pass +def augment_data( + dataset: Any, + def_data_dir: Union[str, Path], + image_size: Tuple[int, int, int], + distance_metrics: List[Dict[str, str]], + batch_size: int = 32, + def_type: str = "spatial_smoothing", + defense_kwargs: Optional[Dict[str, Any]] = None, +): + make_directories([def_data_dir]) + distance_metrics_list = get_distance_metric_list(distance_metrics) + defended_dataset = create_defended_dataset( + data_flow=dataset, + def_data_dir=def_data_dir, + image_size=image_size, + distance_metrics_list=distance_metrics_list, + batch_size=batch_size, + def_type=def_type, + defense_kwargs=defense_kwargs, + ) + return defended_dataset @pyplugs.register def predict(): diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py b/examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py deleted file mode 100644 index 57d002ce1..000000000 --- a/examples/task-plugins/dioptra_custom/vc/artifacts_exceptions.py +++ /dev/null @@ -1,23 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module of exceptions for the artifacts plugins collection.""" - -from dioptra.sdk.exceptions.base import BaseTaskPluginError - - -class UnsupportedDataFrameFileFormatError(BaseTaskPluginError): - """The requested data frame file format is not supported.""" diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py b/examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py deleted file mode 100644 index e2a77af4a..000000000 --- a/examples/task-plugins/dioptra_custom/vc/artifacts_mlflow.py +++ /dev/null @@ -1,241 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for MLFlow artifacts management. - -This module contains a set of task plugins for managing artifacts generated during an -entry point run. -""" - -import tarfile -from pathlib import Path -from typing import Any, Callable, Dict, Optional, Union - -import mlflow -import os -import pandas as pd -import structlog -from mlflow.tracking import MlflowClient -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.utilities.paths import set_path_ext - -from .artifacts_restapi import upload_artifact_to_restapi, get_artifacts_for_job -from .artifacts_exceptions import UnsupportedDataFrameFileFormatError - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -@pyplugs.register -def download_all_artifacts_for_job( - job_id: str, artifact_path: str, destination_path: Optional[str] = None -) -> str: - """Downloads an artifact file or directory from a previous MLFlow run. - - Args: - run_id: The unique identifier of a previous MLFlow run. - artifact_path: The relative source path to the desired artifact. - destination_path: The relative destination path where the artifacts will be - downloaded. If `None`, the artifacts will be downloaded to a new - uniquely-named directory on the local filesystem. The default is `None`. - - Returns: - A string pointing to the directory containing the downloaded artifacts. - - See Also: - - :py:meth:`mlflow.tracking.MlflowClient.download_artifacts` - """ - uris = get_artifacts_for_job(job_id) - for uri in uris: - if uri.endswith(artifact_path): - download_path: str = mlflow.artifacts.download_artifacts( - artifact_uri=uri, dst_path=destination_path - ) - LOGGER.info( - "Artifact downloaded from MLFlow run", - job_id=job_id, - artifact_path=artifact_path, - destination_path=download_path, - ) - - return download_path - - -@pyplugs.register -def upload_data_frame_artifact( - data_frame: pd.DataFrame, - file_name: str, - file_format: str, - file_format_kwargs: Optional[Dict[str, Any]] = None, - working_dir: Optional[Union[str, Path]] = None, -) -> None: - """Uploads a :py:class:`~pandas.DataFrame` as an artifact of the active MLFlow run. - - The `file_format` argument selects the :py:class:`~pandas.DataFrame` serializer, - which are all handled using the object's `DataFrame.to_{format}` methods. The string - passed to `file_format` must match one of the following, - - - `csv[.bz2|.gz|.xz]` - A comma-separated values plain text file with optional - compression. - - `feather` - A binary feather file. - - `json` - A plain text JSON file. - - `pickle` - A binary pickle file. - - Args: - data_frame: A :py:class:`~pandas.DataFrame` to be uploaded. - file_name: The filename to use for the serialized :py:class:`~pandas.DataFrame`. - file_format: The :py:class:`~pandas.DataFrame` file serialization format. - file_format_kwargs: A dictionary of additional keyword arguments to pass to the - serializer. If `None`, then no additional keyword arguments are passed. The - default is `None`. - working_dir: The location where the file should be saved. If `None`, then the - current working directory is used. The default is `None`. - - Notes: - The :py:mod:`pyarrow` package must be installed in order to serialize to the - feather format. - - See Also: - - :py:meth:`pandas.DataFrame.to_csv` - - :py:meth:`pandas.DataFrame.to_feather` - - :py:meth:`pandas.DataFrame.to_json` - - :py:meth:`pandas.DataFrame.to_pickle` - """ - - def to_format( - data_frame: pd.DataFrame, format: str, output_dir: Union[str, Path] - ) -> Dict[str, Any]: - filepath: Path = Path(output_dir) / Path(file_name).name - format_funcs = { - "csv": { - "func": data_frame.to_csv, - "filepath": set_path_ext(filepath=filepath, ext="csv"), - }, - "csv.bz2": { - "func": data_frame.to_csv, - "filepath": set_path_ext(filepath=filepath, ext="csv.bz2"), - }, - "csv.gz": { - "func": data_frame.to_csv, - "filepath": set_path_ext(filepath=filepath, ext="csv.gz"), - }, - "csv.xz": { - "func": data_frame.to_csv, - "filepath": set_path_ext(filepath=filepath, ext="csv.xz"), - }, - "feather": { - "func": data_frame.to_feather, - "filepath": set_path_ext(filepath=filepath, ext="feather"), - }, - "json": { - "func": data_frame.to_json, - "filepath": set_path_ext(filepath=filepath, ext="json"), - }, - "pickle": { - "func": data_frame.to_pickle, - "filepath": set_path_ext(filepath=filepath, ext="pkl"), - }, - } - - func: Optional[Dict[str, Any]] = format_funcs.get(format) - - if func is None: - raise UnsupportedDataFrameFileFormatError( - f"Serializing data frames to the {file_format} format is not supported" - ) - - return func - - if file_format_kwargs is None: - file_format_kwargs = {} - - if working_dir is None: - working_dir = Path.cwd() - - working_dir = Path(working_dir) - format_dict: Dict[str, Any] = to_format( - data_frame=data_frame, format=file_format, output_dir=working_dir - ) - - df_to_format_func: Callable[..., None] = format_dict["func"] - df_artifact_path: Path = format_dict["filepath"] - - df_to_format_func(df_artifact_path, **file_format_kwargs) - LOGGER.info( - "Data frame saved to file", - file_name=df_artifact_path.name, - file_format=file_format, - ) - - upload_file_as_artifact(artifact_path=df_artifact_path) - - -@pyplugs.register -def upload_directory_as_tarball_artifact( - source_dir: Union[str, Path], - tarball_filename: str, - tarball_write_mode: str = "w:gz", - working_dir: Optional[Union[str, Path]] = None, -) -> None: - """Archives a directory and uploads it as an artifact of the active MLFlow run. - - Args: - source_dir: The directory which should be uploaded. - tarball_filename: The filename to use for the archived directory tarball. - tarball_write_mode: The write mode for the tarball, see :py:func:`tarfile.open` - for the full list of compression options. The default is `"w:gz"` (gzip - compression). - working_dir: The location where the file should be saved. If `None`, then the - current working directory is used. The default is `None`. - - See Also: - - :py:func:`tarfile.open` - """ - if working_dir is None: - working_dir = Path.cwd() - - source_dir = Path(source_dir) - working_dir = Path(working_dir) - tarball_path = working_dir / tarball_filename - - with tarfile.open(tarball_path, tarball_write_mode) as f: - f.add(source_dir, arcname=source_dir.name) - - LOGGER.info( - "Directory added to tar archive", - directory=source_dir, - tarball_path=tarball_path, - ) - - upload_file_as_artifact(artifact_path=tarball_path) - - -@pyplugs.register -def upload_file_as_artifact(artifact_path: Union[str, Path]) -> None: - """Uploads a file as an artifact of the active MLFlow run. - - Args: - artifact_path: The location of the file to be uploaded. - - See Also: - - :py:func:`mlflow.log_artifact` - """ - artifact_path = Path(artifact_path) - mlflow.log_artifact(str(artifact_path)) - uri = mlflow.get_artifact_uri(str(artifact_path.name)) - upload_artifact_to_restapi(uri, os.environ['__JOB_ID']) - LOGGER.info("Artifact uploaded for current MLFlow run", filename=artifact_path.name) diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py b/examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py deleted file mode 100644 index 9847e45ed..000000000 --- a/examples/task-plugins/dioptra_custom/vc/artifacts_restapi.py +++ /dev/null @@ -1,151 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -import requests -import structlog - -from dioptra import pyplugs -from structlog.stdlib import BoundLogger -from posixpath import join as urljoin -from urllib.parse import urlparse, urlunparse - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -@pyplugs.register -def get_uri_for_artifact(job_id, index=0): - session, url = get_logged_in_session() - job = get(session, url, 'jobs', str(job_id)) - uri = job['artifacts'][index]['artifactUri'] - return uri -def get_artifacts_for_job(job_id): - session, url = get_logged_in_session() - job = get(session, url, 'jobs', str(job_id)) - return [artifact['artifactUri'] for artifact in job['artifacts']] - -def get_logged_in_session(): - session = requests.Session() - url = "http://dioptra-deployment-restapi:5000/api/v1" - - login = post(session, url, {'username':'pluginuser', 'password':'pleasemakesuretoPLUGINthecomputer'}, 'auth', 'login') - LOGGER.info("login request sent", response=str(login)) - - return session, url - -def upload_model_to_restapi(name, source_uri, job_id): - version = 0 - model_id = 0 - - session, url = get_logged_in_session() - - models = get(session, url, f'models?search={name}&pageLength=500') - LOGGER.info("requesting models from RESTAPI", response=models) - - - for model in models['data']: - #check whether to create a new model - if model['name'] == name: - model_id = model['id'] - if model['latestVersion'] != None: - version = model['latestVersion']['versionNumber'] + 1 - if (version == 0 and model_id == 0): - LOGGER.info("creating new model on RESTAPI") - model = post(session, url, {"group": 1, "name": name, "description": f"{name} model"}, "models") - model_id = model['id'] - LOGGER.info("new model created", response=model) - - artifact = post(session, url, {"group": 1, "description": f"{name} model artifact", "job": str(job_id), "uri": source_uri}, 'artifacts') - LOGGER.info("artifact", response=artifact) - model_version = post(session, url, {"description": f"{name} model version", "artifact": artifact['id']}, 'models', str(model_id), 'versions') - LOGGER.info("model created", response=model_version) - -def upload_artifact_to_restapi(source_uri, job_id): - session, url = get_logged_in_session() - - artifact = post(session, url, {"group": 1, "description": f"artifact for job {job_id}", "job": str(job_id), "uri": source_uri}, 'artifacts') - LOGGER.info("artifact", response=artifact) - -def debug_request(url, method, data=None): - LOGGER.debug("Request made.", url=url, method=method, data=data) - - -def debug_response(json): - LOGGER.debug("Response received.", json=json) - - -def get(session, endpoint, *features): - debug_request(urljoin(endpoint, *features), "GET") - return make_request(session, "get", endpoint, None, *features) - - -def post(session, endpoint, data, *features): - debug_request(urljoin(endpoint, *features), "POST", data) - return make_request(session, "post", endpoint, data, *features) - - -def delete(session, endpoint, data, *features): - debug_request(urljoin(endpoint, *features), "DELETE", data) - return make_request(session, "delete", endpoint, data, *features) - - -def put(session, endpoint, data, *features): - debug_request(urljoin(endpoint, *features), "PUT", data) - return make_request(session, "put", endpoint, data, *features) - - -def make_request(session, method_name, endpoint, data, *features): - url = urljoin(endpoint, *features) - method = getattr(session, method_name) - try: - if data: - response = method(url, json=data) - else: - response = method(url) - if response.status_code != 200: - raise StatusCodeError() - json = response.json() - except (requests.ConnectionError, StatusCodeError, requests.JSONDecodeError) as e: - handle_error(session, url, method_name.upper(), data, response, e) - debug_response(json=json) - return json - - -def handle_error(session, url, method, data, response, error): - if type(error) is requests.ConnectionError: - restapi = os.environ["DIOPTRA_RESTAPI_URI"] - message = ( - f"Could not connect to the REST API. Is the server running at {restapi}?" - ) - LOGGER.error(message, url=url, method=method, data=data, response=response.text) - raise APIConnectionError(message) - if type(error) is StatusCodeError: - message = f"Error code {response.status_code} returned." - LOGGER.error(message, url=url, method=method, data=data, response=response.text) - raise StatusCodeError(message) - if type(error) is requests.JSONDecodeError: - message = "JSON response could not be decoded." - LOGGER.error(message, url=url, method=method, data=data, response=response.text) - raise JSONDecodeError(message) - -class APIConnectionError(Exception): - """Class for connection errors""" - - -class StatusCodeError(Exception): - """Class for status code errors""" - - -class JSONDecodeError(Exception): - """Class for JSON decode errors""" diff --git a/examples/task-plugins/dioptra_custom/vc/artifacts_utils.py b/examples/task-plugins/dioptra_custom/vc/artifacts_utils.py deleted file mode 100644 index 37404c7ae..000000000 --- a/examples/task-plugins/dioptra_custom/vc/artifacts_utils.py +++ /dev/null @@ -1,117 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module containing generic utilities for managing artifacts.""" - -from __future__ import annotations - -import os -import tarfile -import uuid -from pathlib import Path -from tarfile import TarFile -from typing import Any, List, Union - -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -def is_within_directory(directory: Union[str, Path], target: Union[str, Path]) -> bool: - abs_directory = os.path.abspath(directory) - abs_target = os.path.abspath(target) - - prefix = os.path.commonprefix([abs_directory, abs_target]) - - return prefix == abs_directory - - -def safe_extract(tar: TarFile, path: Union[str, Path] = ".") -> None: - for member in tar.getmembers(): - member_path = os.path.join(path, member.name) - if not is_within_directory(path, member_path): - raise Exception("Attempted Path Traversal in Tar File") - - tar.extractall(path, members=None, numeric_owner=False) - - -@pyplugs.register -def extract_tarfile( - filepath: Union[str, Path], - tarball_read_mode: str = "r:gz", - output_dir: Any = None, -) -> None: - """Extracts a tarball archive into the current working directory. - - Args: - filepath: The location of the tarball archive file provided as a string or a - :py:class:`~pathlib.Path` object. - tarball_read_mode: The read mode for the tarball, see :py:func:`tarfile.open` - for the full list of compression options. The default is `"r:gz"` (gzip - compression). - - See Also: - - :py:func:`tarfile.open` - """ - output_dir = Path(output_dir) if output_dir is not None else Path.cwd() - - filepath = Path(filepath) - with tarfile.open(filepath, tarball_read_mode) as f: - safe_extract(f, path=output_dir) - - -@pyplugs.register -def make_directories(dirs: List[Union[str, Path]]) -> None: - """Creates directories if they do not exist. - - Args: - dirs: A list of directories provided as strings or :py:class:`~pathlib.Path` - objects. - """ - for d in dirs: - d = Path(d) - d.mkdir(parents=True, exist_ok=True) - LOGGER.info("Directory created", directory=d) - - -@pyplugs.register -def extract_tarfile_in_unique_subdir( - filepath: Union[str, Path], - tarball_read_mode: str = "r:gz", -) -> Path: - """Extracts a tarball archive into a unique subdirectory of the - current working directory. - - Args: - filepath: The location of the tarball archive file provided as a string or a - :py:class:`~pathlib.Path` object. - tarball_read_mode: The read mode for the tarball, see :py:func:`tarfile.open` - for the full list of compression options. The default is `"r:gz"` (gzip - compression). - - See Also: - - :py:func:`tarfile.open` - """ - output_dir = Path(uuid.uuid4().hex) - output_dir.mkdir(mode=0o700, parents=True, exist_ok=True) - - filepath = Path(filepath) - with tarfile.open(filepath, tarball_read_mode) as f: - safe_extract(f, path=output_dir) - return output_dir diff --git a/examples/task-plugins/dioptra_custom/vc/attacks_fgm.py b/examples/task-plugins/dioptra_custom/vc/attacks_fgm.py deleted file mode 100644 index b11282ea7..000000000 --- a/examples/task-plugins/dioptra_custom/vc/attacks_fgm.py +++ /dev/null @@ -1,305 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for the Fast Gradient Method evasion attack. - -The Fast Gradient Method (FGM) [goodfellow2015]_ is an evasion attack that attempts to -fool a trained classifier by perturbing a test image using the gradient of the -classifier's neural network. This task plugin uses the Adversarial Robustness Toolbox's -[art2019]_ implementation of the |fgm_art|. - -References: - .. [art2019] M.-I. Nicolae et al., "Adversarial Robustness Toolbox v1.0.0," - Nov. 2019. [Online]. Available: - `arXiv:1807.01069v4 [cs.LG] `_. - - .. [goodfellow2015] I. Goodfellow, J. Shlens, and C. Szegedy. (May 2015). - Explaining and Harnessing Adversarial Examples, Presented at the Int. Conf. - on Learn. Represent. 2015, San Diego, California, United States. [Online]. - Available: `arXiv:1412.6572v3 [stat.ML] `_. - -.. |fgm_art| replace:: `Fast Gradient Method `__ -""" - -from __future__ import annotations - -from pathlib import Path -from typing import Callable, Dict, List, Optional, Tuple, Union - -import mlflow -import numpy as np -import pandas as pd -import scipy.stats -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from art.attacks.evasion import FastGradientMethod - from art.estimators.classification import TensorFlowV2Classifier - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="art", - ) - - -try: - from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("art", exc_type=ARTDependencyError) -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def create_adversarial_fgm_dataset( - data_dir: str, - adv_data_dir: Union[str, Path], - keras_classifier: TensorFlowV2Classifier, - image_size: Tuple[int, int, int], - distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, - rescale: float = 1.0 / 255, - batch_size: int = 32, - label_mode: str = "categorical", - eps: float = 0.3, - eps_step: float = 0.1, - minimal: bool = False, - norm: Union[int, float, str] = np.inf, -) -> pd.DataFrame: - """Generates an adversarial dataset using the Fast Gradient Method attack. - - Each generated adversarial image is saved as an image file in the directory - specified by `adv_data_dir` and the distance metric functions passed to - `distance_metrics_list` are used to quantify the size of the perturbation applied to - each image. - - Args: - data_dir: The directory containing the clean test images. - adv_data_dir: The directory to use when saving the generated adversarial images. - keras_classifier: A trained :py:class:`~art.estimators.classification\\ - .TensorFlowV2Classifier`. - image_size: A tuple of integers `(height, width, channels)` used to preprocess - the images so that they all have the same dimensions and number of color - channels. `channels=3` means RGB color images and `channels=1` means - grayscale images. Images with different dimensions will be resized. If - `channels=1`, color images will be converted into grayscale. - distance_metrics_list: A list of distance metrics to compute after generating an - adversarial image. If `None`, then no distance metrics will be calculated. - The default is `None`. - rescale: The rescaling factor for the pixel vectors. If `None` or `0`, no - rescaling is applied, otherwise multiply the data by the value provided - (after applying all other transformations). The default is `1.0 / 255`. - batch_size: The size of the batch on which adversarial samples are generated. - The default is `32`. - label_mode: Determines how the label arrays for the dataset will be returned. - The available choices are: `"categorical"`, `"binary"`, `"sparse"`, - `"input"`, `None`. For information on the meaning of each choice, see - the documentation for |flow_from_directory|. The default is `"categorical"`. - eps: The attack step size. The default is `0.3`. - eps_step: The step size of the input variation for minimal perturbation - computation. The default is `0.1`. - minimal: If `True`, compute the minimal perturbation, and use `eps_step` for the - step size and `eps` for the maximum perturbation. The default is `False`. - norm: The norm of the adversarial perturbation. Can be `"inf"`, - :py:data:`numpy.inf`, `1`, or `2`. The default is :py:data:`numpy.inf`. - - Returns: - A :py:class:`~pandas.DataFrame` containing the full distribution of the - calculated distance metrics. - - See Also: - - |flow_from_directory| - - .. |flow_from_directory| replace:: :py:meth:`tf.keras.preprocessing.image\\ - .ImageDataGenerator.flow_from_directory` - """ - distance_metrics_list = distance_metrics_list or [] - color_mode: str = "color" if image_size[2] == 3 else "grayscale" - target_size: Tuple[int, int] = image_size[:2] - adv_data_dir = Path(adv_data_dir) - - attack = _init_fgm( - keras_classifier=keras_classifier, - batch_size=batch_size, - eps=eps, - eps_step=eps_step, - minimal=minimal, - norm=norm, - ) - - data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) - - data_flow = data_generator.flow_from_directory( - directory=data_dir, - target_size=target_size, - color_mode=color_mode, - class_mode=label_mode, - batch_size=batch_size, - shuffle=False, - ) - num_images = data_flow.n - img_filenames = [Path(x) for x in data_flow.filenames] - - distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} - for metric_name, _ in distance_metrics_list: - distance_metrics_[metric_name] = [] - - LOGGER.info( - "Generate adversarial images", - attack="fgm", - num_batches=num_images // batch_size, - ) - - for batch_num, (x, y) in enumerate(data_flow): - if batch_num >= num_images // batch_size: - break - - clean_filenames = img_filenames[ - batch_num * batch_size : (batch_num + 1) * batch_size # noqa: E203 - ] - - LOGGER.info( - "Generate adversarial image batch", - attack="fgm", - batch_num=batch_num, - ) - - y_int = np.argmax(y, axis=1) - adv_batch = attack.generate(x=x) - - _save_adv_batch(adv_batch, adv_data_dir, y_int, clean_filenames) - - _evaluate_distance_metrics( - clean_filenames=clean_filenames, - distance_metrics_=distance_metrics_, - clean_batch=x, - adv_batch=adv_batch, - distance_metrics_list=distance_metrics_list, - ) - - LOGGER.info("Adversarial image generation complete", attack="fgm") - _log_distance_metrics(distance_metrics_) - - return pd.DataFrame(distance_metrics_) - - -def _init_fgm( - keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs -) -> FastGradientMethod: - """Initializes :py:class:`~art.attacks.evasion.FastGradientMethod`. - - Args: - keras_classifier: A trained :py:class:`~art.estimators.classification\\ - .TensorFlowV2Classifier`. - batch_size: The size of the batch on which adversarial samples are generated. - - Returns: - A :py:class:`~art.attacks.evasion.FastGradientMethod` object. - """ - attack: FastGradientMethod = FastGradientMethod( - estimator=keras_classifier, batch_size=batch_size, **kwargs - ) - return attack - - -def _save_adv_batch(adv_batch, adv_data_dir, y, clean_filenames) -> None: - """Saves a batch of adversarial images to disk. - - Args: - adv_batch: A generated batch of adversarial images. - adv_data_dir: The directory to use when saving the generated adversarial images. - y: An array containing the target labels of the original images. - clean_filenames: A list containing the filenames of the original images. - """ - for batch_image_num, adv_image in enumerate(adv_batch): - adv_image_path = ( - adv_data_dir - / f"{y[batch_image_num]}" - / f"adv_{clean_filenames[batch_image_num].name}" - ) - - if not adv_image_path.parent.exists(): - adv_image_path.parent.mkdir(parents=True) - - save_img(path=str(adv_image_path), x=adv_image) - - -def _evaluate_distance_metrics( - clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list -) -> None: - """Calculates distance metrics for a batch of clean/adversarial image pairs. - - Args: - clean_filenames: A list containing the filenames of the original images. - distance_metrics_: A dictionary used to record the values of the distance - metrics computed for the clean/adversarial image pairs. - clean_batch: The clean images used to generate the adversarial images in - `adv_batch`. - adv_batch: A generated batch of adversarial images. - distance_metrics_list: A list of distance metrics to compute after generating an - adversarial image. - """ - LOGGER.debug("evaluate image perturbations using distance metrics") - distance_metrics_["image"].extend([x.name for x in clean_filenames]) - distance_metrics_["label"].extend([x.parent for x in clean_filenames]) - for metric_name, metric in distance_metrics_list: - distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) - - -def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: - """Logs the distance metrics summary statistics to the MLFlow Tracking service. - - The following summary statistics are calculated and logged to the MLFlow Tracking - service for each of the distributions recorded in the `distance_metrics_` - dictionary: - - - mean - - median - - standard deviation - - interquartile range - - minimum - - maximum - - Args: - distance_metrics_: A dictionary used to record the values of the distance - metrics computed for the clean/adversarial image pairs. - """ - distance_metrics_ = distance_metrics_.copy() - del distance_metrics_["image"] - del distance_metrics_["label"] - for metric_name, metric_values_list in distance_metrics_.items(): - metric_values = np.array(metric_values_list) - mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) - mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) - mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) - mlflow.log_metric( - key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) - ) - mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) - mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) - LOGGER.info("logged distance-based metric", metric_name=metric_name) diff --git a/examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py b/examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py deleted file mode 100644 index 10ca767e5..000000000 --- a/examples/task-plugins/dioptra_custom/vc/backend_configs_tensorflow.py +++ /dev/null @@ -1,52 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for initializing and configuring Tensorflow.""" - -from __future__ import annotations - -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -try: - import tensorflow as tf - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def init_tensorflow(seed: int) -> None: - """Initializes Tensorflow to ensure reproducibility. - - This task plugin **must** be run before any other features from Tensorflow are used - to ensure reproducibility. - - Args: - seed: The seed to use for Tensorflow's random number generator. - """ - tf.random.set_seed(seed) diff --git a/examples/task-plugins/dioptra_custom/vc/builtin.py b/examples/task-plugins/dioptra_custom/vc/builtin.py deleted file mode 100644 index f477ecc58..000000000 --- a/examples/task-plugins/dioptra_custom/vc/builtin.py +++ /dev/null @@ -1,208 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -from __future__ import annotations - -from pathlib import Path -from typing import Callable, Dict, List, Optional, Tuple, Union - -import mlflow -import numpy as np -import pandas as pd -import scipy.stats -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from art.attacks.evasion import FastGradientMethod - from art.estimators.classification import TensorFlowV2Classifier - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="art", - ) - - -try: - from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("art", exc_type=ARTDependencyError) -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def create_adversarial_fgm_dataset( - data_dir: str, - adv_data_dir: Union[str, Path], - keras_classifier: TensorFlowV2Classifier, - image_size: Tuple[int, int, int], - distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, - rescale: float = 1.0 / 255, - batch_size: int = 32, - label_mode: str = "categorical", - eps: float = 0.3, - eps_step: float = 0.1, - minimal: float = 0, - norm: float = np.inf, - target_index: int = -1, - targeted: bool = False, -) -> pd.DataFrame: - distance_metrics_list = distance_metrics_list or [] - color_mode: str = "rgb" if image_size[2] == 3 else "grayscale" - target_size: Tuple[int, int] = image_size[:2] - adv_data_dir = Path(adv_data_dir) - - attack = _init_fgm( - keras_classifier=keras_classifier, - batch_size=batch_size, - eps=eps, - eps_step=eps_step, - minimal=minimal, - norm=norm, - targeted=targeted, - ) - - data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) - - data_flow = data_generator.flow_from_directory( - directory=data_dir, - target_size=target_size, - color_mode=color_mode, - class_mode=label_mode, - batch_size=batch_size, - shuffle=False, - ) - n_classes = len(data_flow.class_indices) - num_images = data_flow.n - img_filenames = [Path(x) for x in data_flow.filenames] - class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) - - distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} - for metric_name, _ in distance_metrics_list: - distance_metrics_[metric_name] = [] - - LOGGER.info( - "Generate adversarial images", - attack="fgm", - num_batches=num_images // batch_size, - ) - - for batch_num, (x, y) in enumerate(data_flow): - if batch_num >= num_images // batch_size: - break - - clean_filenames = img_filenames[ - batch_num * batch_size : (batch_num + 1) * batch_size # noqa: E203 - ] - - LOGGER.info( - "Generate adversarial image batch", - attack="fgm", - batch_num=batch_num, - ) - - y_int = np.argmax(y, axis=1) - if target_index >= 0: - y_one_hot = np.zeros(n_classes) - y_one_hot[target_index] = 1.0 - y_target = np.tile(y_one_hot, (x.shape[0], 1)) - - adv_batch = attack.generate(x=x, y=y_target) - else: - adv_batch = attack.generate(x=x) - - _save_adv_batch( - adv_batch, adv_data_dir, y_int, clean_filenames, class_names_list - ) - - _evaluate_distance_metrics( - clean_filenames=clean_filenames, - distance_metrics_=distance_metrics_, - clean_batch=x, - adv_batch=adv_batch, - distance_metrics_list=distance_metrics_list, - ) - - LOGGER.info("Adversarial image generation complete", attack="fgm") - _log_distance_metrics(distance_metrics_) - - return pd.DataFrame(distance_metrics_) - - -def _init_fgm( - keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs -) -> FastGradientMethod: - attack: FastGradientMethod = FastGradientMethod( - estimator=keras_classifier, batch_size=batch_size, **kwargs - ) - return attack - - -def _save_adv_batch( - adv_batch, adv_data_dir, y, clean_filenames, class_names_list -) -> None: - for batch_image_num, adv_image in enumerate(adv_batch): - out_label = class_names_list[y[batch_image_num]] - adv_image_path = ( - adv_data_dir - / f"{out_label}" - / f"adv_{clean_filenames[batch_image_num].name}" - ) - - if not adv_image_path.parent.exists(): - adv_image_path.parent.mkdir(parents=True) - - save_img(path=str(adv_image_path), x=adv_image) - - -def _evaluate_distance_metrics( - clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list -) -> None: - LOGGER.debug("evaluate image perturbations using distance metrics") - distance_metrics_["image"].extend([x.name for x in clean_filenames]) - distance_metrics_["label"].extend([x.parent for x in clean_filenames]) - for metric_name, metric in distance_metrics_list: - distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) - - -def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: - distance_metrics_ = distance_metrics_.copy() - del distance_metrics_["image"] - del distance_metrics_["label"] - for metric_name, metric_values_list in distance_metrics_.items(): - metric_values = np.array(metric_values_list) - mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) - mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) - mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) - mlflow.log_metric( - key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) - ) - mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) - mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) - LOGGER.info("logged distance-based metric", metric_name=metric_name) - diff --git a/examples/task-plugins/dioptra_custom/vc/data_tensorflow.py b/examples/task-plugins/dioptra_custom/vc/data_tensorflow.py deleted file mode 100644 index facabcaa0..000000000 --- a/examples/task-plugins/dioptra_custom/vc/data_tensorflow.py +++ /dev/null @@ -1,128 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for preparing Tensorflow-specific dataset iterators. - -.. |flow_from_directory| replace:: :py:meth:`tensorflow.keras.preprocessing.image\\ - .ImageDataGenerator.flow_from_directory` -.. |directory_iterator| replace:: :py:class:`~tensorflow.keras.preprocessing.image\\ - .DirectoryIterator` -""" - -from __future__ import annotations - -from typing import Optional, Tuple - -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from tensorflow.keras.preprocessing.image import ( - DirectoryIterator, - ImageDataGenerator, - ) - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def create_image_dataset( - data_dir: str, - subset: Optional[str], - image_size: Tuple[int, int, int], - seed: int, - rescale: float = 1.0 / 255, - validation_split: Optional[float] = 0.2, - batch_size: int = 32, - label_mode: str = "categorical", -) -> DirectoryIterator: - """Yields an iterator for generating batches of real-time augmented image data. - - Args: - data_dir: The directory containing the image dataset. - subset: The subset of data (`"training"` or `"validation"`) to use if - `validation_split` is not `None`. If `None`, then `validation_split` must - also be `None`. - image_size: A tuple of integers `(height, width, channels)` used to preprocess - the images so that they all have the same dimensions and number of color - channels. `channels=3` means RGB color images and `channels=1` means - grayscale images. Images with different dimensions will be resized. If - `channels=1`, color images will be converted into grayscale. - seed: Sets the random seed used for shuffling and transformations. - rescale: The rescaling factor for the pixel vectors. If `None` or `0`, no - rescaling is applied, otherwise multiply the data by the value provided - (after applying all other transformations). The default is `1.0 / 255`. - validation_split: The fraction of the data to set aside for validation. If not - `None`, the value given here must be between `0` and `1`. If `None`, then - there is no validation set. The default is `0.2`. - batch_size: The size of the batch on which adversarial samples are generated. - The default is `32`. - label_mode: Determines how the label arrays for the dataset will be returned. - The available choices are: `"categorical"`, `"binary"`, `"sparse"`, - `"input"`, `None`. For information on the meaning of each choice, see - the documentation for |flow_from_directory|. The default is `"categorical"`. - - Returns: - A :py:class:`~tensorflow.keras.preprocessing.image.DirectoryIterator` object. - - See Also: - - |flow_from_directory| - - :py:class:`~tensorflow.keras.preprocessing.image.DirectoryIterator` - """ - color_mode: str = ( - "rgb" if image_size[2] == 3 else "rgba" if image_size[2] == 4 else "grayscale" - ) - target_size: Tuple[int, int] = image_size[:2] - - data_generator: ImageDataGenerator = ImageDataGenerator( - rescale=rescale, - validation_split=validation_split, - ) - - return data_generator.flow_from_directory( - directory=data_dir, - target_size=target_size, - color_mode=color_mode, - class_mode=label_mode, - batch_size=batch_size, - seed=seed, - subset=subset, - ) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_n_classes_from_directory_iterator(ds: DirectoryIterator) -> int: - """Returns the number of unique labels found by the |directory_iterator|. - - Args: - ds: A |directory_iterator| object. - - Returns: - The number of unique labels in the dataset. - """ - return len(ds.class_indices) diff --git a/examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py b/examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py deleted file mode 100644 index f5ef72548..000000000 --- a/examples/task-plugins/dioptra_custom/vc/estimators_keras_classifiers.py +++ /dev/null @@ -1,231 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Neural network image classifiers implemented in Tensorflow/Keras.""" - -from __future__ import annotations - -from types import FunctionType -from typing import Callable, Dict, List, Tuple, Union - -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from tensorflow.keras.layers import ( - BatchNormalization, - Conv2D, - Dense, - Dropout, - Flatten, - MaxPooling2D, - ) - from tensorflow.keras.metrics import Metric - from tensorflow.keras.models import Sequential - from tensorflow.keras.optimizers import Optimizer - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def init_classifier( - model_architecture: str, - optimizer: Optimizer, - metrics: List[Union[Metric, FunctionType]], - input_shape: Tuple[int, int, int], - n_classes: int, - loss: str = "categorical_crossentropy", -) -> Sequential: - """Initializes an untrained neural network image classifier for Tensorflow/Keras. - - The `model_architecture` argument is used to select a neural network architecture - from the architecture registry. The string passed to `model_architecture` must match - one of the following, - - - `"shallow_net"` - A shallow neural network architecture. - - `"le_net"` - The LeNet-5 convolutional neural network architecture. - - `"alex_net"` - The AlexNet convolutional neural network architecture. - - Args: - model_architecture: The neural network architecture to use. - optimizer: A Keras :py:class:`~tf.keras.optimizers.Optimizer` providing an - algorithm to use to train the estimator, such as - :py:class:`~tf.keras.optimizers.SGD` and - :py:class:`~tf.keras.optimizers.Adam`. - metrics: A list of metrics to be evaluated by the model during training and - testing. - input_shape: A shape tuple of integers, not including the batch size, specifying - the dimensions of the image data. The shape tuple for all classifiers in the - architecture registry follows the convention `(height, width, channels)`. - n_classes: The number of target labels in the dataset. - loss: A string specifying the loss function to be minimized during training. The - string must match the name of one of the loss functions in the - :py:mod:`tf.keras.losses` module. The default is - `"categorical_crossentropy"`. - - Returns: - A compiled :py:class:`~tf.keras.Sequential` object. - - See Also: - - :py:mod:`tf.keras.losses` - - :py:mod:`tf.keras.optimizers` - - :py:class:`tf.keras.Sequential` - """ - classifier: Sequential = KERAS_CLASSIFIERS_REGISTRY[model_architecture]( - input_shape, - n_classes, - ) - classifier.compile(loss=loss, optimizer=optimizer, metrics=metrics) - - return classifier - - -def shallow_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: - """Builds an untrained shallow neural network architecture for Tensorflow/Keras. - - Args: - input_shape: A shape tuple of integers, not including the batch size, specifying - the dimensions of the image data. The shape tuple for all classifiers in the - architecture registry follows the convention `(height, width, channels)`. - n_classes: The number of target labels in the dataset. - - Returns: - A :py:class:`~tf.keras.Sequential` object. - - See Also: - - :py:class:`tf.keras.Sequential` - """ - model = Sequential() - - # Flatten inputs - model.add(Flatten(input_shape=input_shape)) - - # single hidden layer: - model.add(Dense(32, activation="sigmoid")) - - # output layer: - model.add(Dense(n_classes, activation="softmax")) - - return model - - -def le_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: - """Builds an untrained LeNet-5 neural network architecture for Tensorflow/Keras. - - Args: - input_shape: A shape tuple of integers, not including the batch size, specifying - the dimensions of the image data. The shape tuple for all classifiers in the - architecture registry follows the convention `(height, width, channels)`. - n_classes: The number of target labels in the dataset. - - Returns: - A :py:class:`~tf.keras.Sequential` object. - - See Also: - - :py:class:`tf.keras.Sequential` - """ - model = Sequential() - - # first convolutional layer: - model.add( - Conv2D(32, kernel_size=(3, 3), activation="relu", input_shape=input_shape) - ) - - # second conv layer, with pooling and dropout: - model.add(Conv2D(64, kernel_size=(3, 3), activation="relu")) - model.add(MaxPooling2D(pool_size=(2, 2))) - model.add(Dropout(0.25)) - model.add(Flatten()) - - # dense hidden layer, with dropout: - model.add(Dense(128, activation="relu")) - model.add(Dropout(0.5)) - - # output layer: - model.add(Dense(n_classes, activation="softmax")) - - return model - - -def alex_net(input_shape: Tuple[int, int, int], n_classes: int) -> Sequential: - """Builds an untrained AlexNet neural network architecture for Tensorflow/Keras. - - Args: - input_shape: A shape tuple of integers, not including the batch size, specifying - the dimensions of the image data. The shape tuple for all classifiers in the - architecture registry follows the convention `(height, width, channels)`. - n_classes: The number of target labels in the dataset. - - Returns: - A :py:class:`~tf.keras.Sequential` object. - - See Also: - - :py:class:`tf.keras.Sequential` - """ - model = Sequential() - - # first conv-pool block: - model.add( - Conv2D( - 96, - kernel_size=(11, 11), - strides=(4, 4), - activation="relu", - input_shape=input_shape, - ) - ) - model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) - model.add(BatchNormalization()) - - # second conv-pool block: - model.add(Conv2D(256, kernel_size=(5, 5), activation="relu")) - model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) - model.add(BatchNormalization()) - - # third conv-pool block: - model.add(Conv2D(256, kernel_size=(3, 3), activation="relu")) - model.add(Conv2D(384, kernel_size=(3, 3), activation="relu")) - model.add(Conv2D(384, kernel_size=(3, 3), activation="relu")) - model.add(MaxPooling2D(pool_size=(3, 3), strides=(2, 2))) - model.add(BatchNormalization()) - - # dense layers: - model.add(Flatten()) - model.add(Dense(4096, activation="tanh")) - model.add(Dropout(0.5)) - model.add(Dense(4096, activation="tanh")) - model.add(Dropout(0.5)) - - # output layer: - model.add(Dense(n_classes, activation="softmax")) - - return model - - -KERAS_CLASSIFIERS_REGISTRY: Dict[ - str, Callable[[Tuple[int, int, int], int], Sequential] -] = dict(shallow_net=shallow_net, le_net=le_net, alex_net=alex_net) diff --git a/examples/task-plugins/dioptra_custom/vc/estimators_methods.py b/examples/task-plugins/dioptra_custom/vc/estimators_methods.py deleted file mode 100644 index 28396c530..000000000 --- a/examples/task-plugins/dioptra_custom/vc/estimators_methods.py +++ /dev/null @@ -1,122 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -from __future__ import annotations - -import datetime -from typing import Any, Dict, Optional - -import mlflow -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.generics import estimator_predict, fit_estimator - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -@pyplugs.register -def fit( - estimator: Any, - x: Any = None, - y: Any = None, - fit_kwargs: Optional[Dict[str, Any]] = None, -) -> Any: - """Fits the estimator to the given data. - - This task plugin wraps :py:func:`~dioptra.sdk.generics.fit_estimator`, which is a - generic function that uses multiple argument dispatch to handle the estimator - fitting method for different machine learning libraries. The modules attached to the - advertised plugin entry point `dioptra.generics.fit_estimator` are used to build the - function dispatch registry at runtime. For more information on the supported fitting - methods and `fit_kwargs` arguments, please refer to the documentation of the - registered dispatch functions. - - Args: - estimator: The model to be trained. - x: The input data to be used for training. - y: The target data to be used for training. - fit_kwargs: An optional dictionary of keyword arguments to pass to the - dispatched function. - - Returns: - The object returned by the estimator's fitting function. For further details on - the type of object this method can return, see the documentation for the - registered dispatch functions. - - See Also: - - :py:func:`dioptra.sdk.generics.fit_estimator` - """ - fit_kwargs = fit_kwargs or {} - time_start: datetime.datetime = datetime.datetime.now() - - LOGGER.info( - "Begin estimator fit", - timestamp=time_start.isoformat(), - ) - - estimator_fit_result: Any = fit_estimator(estimator, x, y, **fit_kwargs) - - time_end: datetime.datetime = datetime.datetime.now() - - total_seconds: float = (time_end - time_start).total_seconds() - total_minutes: float = total_seconds / 60 - - mlflow.log_metric("training_time_in_minutes", total_minutes) - LOGGER.info( - "Estimator fit complete", - timestamp=time_end.isoformat(), - total_minutes=total_minutes, - ) - - return estimator_fit_result - - -@pyplugs.register -def predict( - estimator: Any, - x: Any = None, - predict_kwargs: Optional[Dict[str, Any]] = None, -) -> Any: - """Uses the estimator to make predictions on the given input data. - - This task plugin wraps :py:func:`~dioptra.sdk.generics.estimator_predict`, which is - a generic function that uses multiple argument dispatch to handle estimator - prediction methods for different machine learning libraries. The modules attached to - the advertised plugin entry point `dioptra.generics.estimator_predict` are used to - build the function dispatch registry at runtime. For more information on the - supported prediction methods and `predict_kwargs` arguments, refer to the - documentation of the registered dispatch functions. - - Args: - estimator: A trained model to be used to generate predictions. - x: The input data for which to generate predictions. - predict_kwargs: An optional dictionary of keyword arguments to pass to the - dispatched function. - - Returns: - The object returned by the estimator's predict function. For further details on - the type of object this method can return, see the documentation for the - registered dispatch functions. - - See Also: - - :py:func:`dioptra.sdk.generics.estimator_predict` - """ - predict_kwargs = predict_kwargs or {} - prediction: Any = estimator_predict(estimator, x, **predict_kwargs) - - return prediction diff --git a/examples/task-plugins/dioptra_custom/vc/import_keras.py b/examples/task-plugins/dioptra_custom/vc/import_keras.py deleted file mode 100644 index b5d03b51c..000000000 --- a/examples/task-plugins/dioptra_custom/vc/import_keras.py +++ /dev/null @@ -1,65 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -from __future__ import annotations - -import importlib -from types import FunctionType, ModuleType -from typing import Union - -import structlog -from structlog.stdlib import BoundLogger - -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from tensorflow.keras.callbacks import Callback - from tensorflow.keras.metrics import Metric - from tensorflow.keras.optimizers import Optimizer - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - -KERAS_CALLBACKS: str = "tensorflow.keras.callbacks" -KERAS_METRICS: str = "tensorflow.keras.metrics" -KERAS_OPTIMIZERS: str = "tensorflow.keras.optimizers" - - -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_callback(callback_name: str) -> Callback: - keras_callbacks: ModuleType = importlib.import_module(KERAS_CALLBACKS) - callback: Callback = getattr(keras_callbacks, callback_name) - return callback - - -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_metric(metric_name: str) -> Union[Metric, FunctionType]: - keras_metrics: ModuleType = importlib.import_module(KERAS_METRICS) - metric: Metric = getattr(keras_metrics, metric_name) - return metric - - -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_optimizer(optimizer_name: str) -> Optimizer: - keras_optimizers: ModuleType = importlib.import_module(KERAS_OPTIMIZERS) - optimizer: Optimizer = getattr(keras_optimizers, optimizer_name) - return optimizer diff --git a/examples/task-plugins/dioptra_custom/vc/metrics_distance.py b/examples/task-plugins/dioptra_custom/vc/metrics_distance.py deleted file mode 100644 index 034f5a02c..000000000 --- a/examples/task-plugins/dioptra_custom/vc/metrics_distance.py +++ /dev/null @@ -1,307 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for getting functions from a distance metric registry. - -.. |Linf| replace:: L\\ :sub:`∞` -.. |L1| replace:: L\\ :sub:`1` -.. |L2| replace:: L\\ :sub:`2` -""" - -from __future__ import annotations - -from typing import Any, Callable, Dict, List, Optional, Tuple - -import numpy as np -import structlog -from scipy.stats import wasserstein_distance -from sklearn.metrics.pairwise import paired_distances -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs - -from .metrics_exceptions import UnknownDistanceMetricError - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -@pyplugs.register -def get_distance_metric_list( - request: List[Dict[str, str]] -) -> List[Tuple[str, Callable[..., np.ndarray]]]: - """Gets multiple distance metric functions from the registry. - - The following metrics are available in the registry, - - - `l_inf_norm` - - `l_1_norm` - - `l_2_norm` - - `paired_cosine_similarities` - - `paired_euclidean_distances` - - `paired_manhattan_distances` - - `paired_wasserstein_distances` - - Args: - request: A list of dictionaries with the keys `name` and `func`. The `func` key - is used to lookup the metric function in the registry and must match one of - the metric names listed above. The `name` key is human-readable label for - the metric function. - - Returns: - A list of tuples with two elements. The first element of each tuple is the label - from the `name` key of `request`, and the second element is the callable metric - function. - """ - distance_metrics_list: List[Tuple[str, Callable[..., np.ndarray]]] = [] - - for metric in request: - metric_callable: Optional[Callable[..., np.ndarray]] = ( - DISTANCE_METRICS_REGISTRY.get(metric["func"]) - ) - - if metric_callable is not None: - distance_metrics_list.append((metric["name"], metric_callable)) - - else: - LOGGER.warn( - "Distance metric not in registry, skipping...", - name=metric["name"], - func=metric["func"], - ) - - return distance_metrics_list - - -@pyplugs.register -def get_distance_metric(func: str) -> Callable[..., np.ndarray]: - """Gets a distance metric function from the registry. - - The following metrics are available in the registry, - - - `l_inf_norm` - - `l_1_norm` - - `l_2_norm` - - `paired_cosine_similarities` - - `paired_euclidean_distances` - - `paired_manhattan_distances` - - `paired_wasserstein_distances` - - Args: - func: A string that identifies the distance metric to return from the registry. - The string must match one of the names of the metrics in the registry. - - Returns: - A callable distance metric function. - """ - metric_callable: Optional[Callable[..., np.ndarray]] = ( - DISTANCE_METRICS_REGISTRY.get(func) - ) - - if metric_callable is None: - LOGGER.error( - "Distance metric not in registry", - func=func, - ) - raise UnknownDistanceMetricError( - f"Could not find any distance metric named {func!r} in the metrics " - "plugin collection. Check spelling and try again." - ) - - return metric_callable - - -def l_inf_norm(y_true, y_pred) -> np.ndarray: - """Calculates the |Linf| norm between a batch of two matrices. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of |Linf| norms. - """ - metric: np.ndarray = _matrix_difference_l_norm( - y_true=y_true, y_pred=y_pred, order=np.inf - ) - return metric - - -def l_1_norm(y_true, y_pred) -> np.ndarray: - """Calculates the |L1| norm between a batch of two matrices. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of |L1| norms. - """ - metric: np.ndarray = _matrix_difference_l_norm( - y_true=y_true, y_pred=y_pred, order=1 - ) - return metric - - -def l_2_norm(y_true, y_pred) -> np.ndarray: - """Calculates the |L2| norm between a batch of two matrices. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of |L2| norms. - """ - metric: np.ndarray = _matrix_difference_l_norm( - y_true=y_true, y_pred=y_pred, order=2 - ) - return metric - - -def paired_cosine_similarities(y_true, y_pred) -> np.ndarray: - """Calculates the cosine similarity between a batch of two matrices. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of cosine similarities. - """ - y_true_normalized: np.ndarray = _normalize_batch(_flatten_batch(y_true), order=2) - y_pred_normalized: np.ndarray = _normalize_batch(_flatten_batch(y_pred), order=2) - metric: np.ndarray = np.sum(y_true_normalized * y_pred_normalized, axis=1) - return metric - - -def paired_euclidean_distances(y_true, y_pred) -> np.ndarray: - """Calculates the Euclidean distance between a batch of two matrices. - - The Euclidean distance is equivalent to the |L2| norm. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of euclidean distances. - """ - metric: np.ndarray = l_2_norm(y_true=y_true, y_pred=y_pred) - return metric - - -def paired_manhattan_distances(y_true, y_pred) -> np.ndarray: - """Calculates the Manhattan distance between a batch of two matrices. - - The Manhattan distance is equivalent to the |L1| norm. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of Manhattan distances. - """ - metric: np.ndarray = l_1_norm(y_true=y_true, y_pred=y_pred) - return metric - - -def paired_wasserstein_distances(y_true, y_pred, **kwargs) -> np.ndarray: - """Calculates the Wasserstein distance between a batch of two matrices. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of Wasserstein distances. - - See Also: - - :py:func:`scipy.stats.wasserstein_distance` - """ - - def wrapped_metric(X, Y): - return wasserstein_distance(u_values=X, v_values=Y, **kwargs) - - metric: np.ndarray = paired_distances( - X=_flatten_batch(y_true), Y=_flatten_batch(y_pred), metric=wrapped_metric - ) - return metric - - -def _flatten_batch(X: np.ndarray) -> np.ndarray: - """Flattens each of the matrices in a batch into a one-dimensional array. - - Args: - X: A batch of matrices. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of one-dimensional arrays. - """ - num_samples: int = X.shape[0] - num_matrix_elements: int = int(np.prod(X.shape[1:])) - return X.reshape((num_samples, num_matrix_elements)) - - -def _matrix_difference_l_norm(y_true, y_pred, order) -> np.ndarray: - """Calculates a batch of norms of the difference between two matrices. - - Args: - y_true: A batch of matrices containing the original or target values. - y_pred: A batch of matrices containing the perturbed or predicted values. - order: The order of the norm, see :py:func:`numpy.linalg.norm` for the full list - of norms that can be calculated. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of norms. - - See Also: - - :py:func:`numpy.linalg.norm` - """ - y_diff: np.ndarray = _flatten_batch(y_true - y_pred) - y_diff_l_norm: np.ndarray = np.linalg.norm(y_diff, axis=1, ord=order) - return y_diff_l_norm - - -def _normalize_batch(X: np.ndarray, order: int) -> np.ndarray: - """Normalizes a batch of matrices by their norms. - - Args: - X: A batch of matrices to be normalized. - order: The order of the norm used for normalization, see - :py:func:`numpy.linalg.norm` for the full list of available norms. - - Returns: - A :py:class:`numpy.ndarray` containing a batch of normalized matrices. - - See Also: - - :py:func:`numpy.linalg.norm` - """ - X_l_norm: np.ndarray = np.linalg.norm(X, axis=1, ord=order) - num_samples: int = X_l_norm.shape[0] - normalized_batch: np.ndarray = X / X_l_norm.reshape((num_samples, 1)) - return normalized_batch - - -DISTANCE_METRICS_REGISTRY: Dict[str, Callable[..., Any]] = dict( - l_inf_norm=l_inf_norm, - l_1_norm=l_1_norm, - l_2_norm=l_2_norm, - paired_cosine_similarities=paired_cosine_similarities, - paired_euclidean_distances=paired_euclidean_distances, - paired_manhattan_distances=paired_manhattan_distances, - paired_wasserstein_distances=paired_wasserstein_distances, -) diff --git a/examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py b/examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py deleted file mode 100644 index fc88cc25c..000000000 --- a/examples/task-plugins/dioptra_custom/vc/metrics_exceptions.py +++ /dev/null @@ -1,27 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module of exceptions for the metrics plugins collection.""" - -from dioptra.sdk.exceptions.base import BaseTaskPluginError - - -class UnknownDistanceMetricError(BaseTaskPluginError): - """The requested distance metric could not be located.""" - - -class UnknownPerformanceMetricError(BaseTaskPluginError): - """The requested performance metric could not be located.""" diff --git a/examples/task-plugins/dioptra_custom/vc/mlflow.py b/examples/task-plugins/dioptra_custom/vc/mlflow.py deleted file mode 100644 index 8546dff8c..000000000 --- a/examples/task-plugins/dioptra_custom/vc/mlflow.py +++ /dev/null @@ -1,103 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for using the MLFlow model registry.""" - -from __future__ import annotations - -from pathlib import Path -from typing import Optional - -import mlflow -import os -import structlog -from mlflow.entities.model_registry import ModelVersion -from mlflow.tracking import MlflowClient -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from .artifacts_restapi import upload_model_to_restapi -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -@pyplugs.register -def add_model_to_registry(name: str, model_dir: str) -> Optional[ModelVersion]: - """Registers a trained model logged during the current run to the MLFlow registry. - - Args: - active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's - state. - name: The registration name to use for the model. - model_dir: The relative artifact directory where MLFlow logged the model trained - during the current run. - - Returns: - A :py:class:`~mlflow.entities.model_registry.ModelVersion` object created by the - backend. - """ - job_id = os.environ['__JOB_ID'] - if not name.strip(): - return None - - active_run = mlflow.active_run() - - run_id: str = active_run.info.run_id - artifact_uri: str = active_run.info.artifact_uri - source: str = f"{artifact_uri}/{model_dir}" - - registered_models = [x.name for x in MlflowClient().search_registered_models()] - - if name not in registered_models: - LOGGER.info("create registered model", name=name) - MlflowClient().create_registered_model(name=name) - - LOGGER.info("create model version", name=name, source=source, run_id=run_id) - model_version: ModelVersion = MlflowClient().create_model_version( - name=name, source=source, run_id=run_id - ) - upload_model_to_restapi(name, source, job_id) - - return model_version - - -@pyplugs.register -def get_experiment_name() -> str: - """Gets the name of the experiment for the current run. - - Args: - active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's - state. - - Returns: - The name of the experiment. - """ - active_run = mlflow.active_run() - - experiment_name: str = ( - MlflowClient().get_experiment(active_run.info.experiment_id).name - ) - LOGGER.info( - "Obtained experiment name of active run", experiment_name=experiment_name - ) - - return experiment_name - - -@pyplugs.register -def prepend_cwd(path: str) -> Path: - ret = Path.cwd() / path - return ret - diff --git a/examples/task-plugins/dioptra_custom/vc/random_rng.py b/examples/task-plugins/dioptra_custom/vc/random_rng.py deleted file mode 100644 index d10b2bd60..000000000 --- a/examples/task-plugins/dioptra_custom/vc/random_rng.py +++ /dev/null @@ -1,56 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for managing random number generators.""" - -from __future__ import annotations - -from typing import Tuple - -import numpy as np -import structlog -from numpy.random._generator import Generator as RNGenerator -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -@pyplugs.register -@pyplugs.task_nout(2) -def init_rng(seed: int = -1) -> Tuple[int, RNGenerator]: - """Constructs a new random number generator. - - Args: - seed: A seed to initialize the random number generator. If the value is less - than zero, then the seed is generated by pulling fresh, unpredictable - entropy from the OS. The default is `-1`. - - Returns: - A tuple containing the seed and the initialized random number generator. If a - `seed < 0` was passed as an argument, then the seed generated by the OS will be - returned. - - See Also: - - :py:func:`numpy.random.default_rng` - """ - rng = np.random.default_rng(seed if seed >= 0 else None) - - if seed < 0: - seed = rng.bit_generator._seed_seq.entropy # type: ignore[attr-defined] - - return int(seed), rng diff --git a/examples/task-plugins/dioptra_custom/vc/random_sample.py b/examples/task-plugins/dioptra_custom/vc/random_sample.py deleted file mode 100644 index 33c13d5d5..000000000 --- a/examples/task-plugins/dioptra_custom/vc/random_sample.py +++ /dev/null @@ -1,89 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for drawing random samples.""" - -from __future__ import annotations - -from typing import Optional, Tuple, Union - -import numpy as np -import structlog -from numpy.random._generator import Generator as RNGenerator -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - - -@pyplugs.register -def draw_random_integer(rng: RNGenerator, low: int = 0, high: int = 2**31 - 1) -> int: - """Returns a random integer from `low` (inclusive) to `high` (exclusive). - - The integer is sampled from a uniform distribution. - - Args: - rng: A random number generator returned by :py:func:`~.rng.init_rng`. - low: Lowest (signed) integers to be drawn from the distribution (unless - `high=None`, in which case this parameter is `0` and this value is used for - `high`). - high: If not `None`, one above the largest (signed) integer to be drawn from the - distribution (see above for behavior if `high=None`) - - Returns: - A random integer. - - See Also: - - :py:meth:`numpy.random.Generator.integers` - """ - result: int = int(rng.integers(low=low, high=high)) - - return result - - -@pyplugs.register -def draw_random_integers( - rng: RNGenerator, - low: int = 0, - high: int = 2**31 - 1, - size: Optional[Union[int, Tuple[int, ...]]] = None, -) -> np.ndarray: - """Returns random integers from `low` (inclusive) to `high` (exclusive). - - The integers are sampled from a uniform distribution. - - Args: - rng: A random number generator returned by :py:func:`~.rng.init_rng`. - low: Lowest (signed) integers to be drawn from the distribution (unless - `high=None`, in which case this parameter is `0` and this value is used for - `high`). - high: If not `None`, one above the largest (signed) integer to be drawn from the - distribution (see above for behavior if `high=None`). - size: The output shape of array. If the given shape is, e.g., `(m, n, k)`, then - `m * n * k` samples are drawn. If `None`, a single value is returned. The - default is `None`. - - Returns: - A `size`-shaped array of random integers. - - See Also: - - :py:meth:`numpy.random.Generator.integers` - """ - size = size or 1 - result: np.ndarray = rng.integers(low=low, high=high, size=size) - - return result diff --git a/examples/task-plugins/dioptra_custom/vc/registry_art.py b/examples/task-plugins/dioptra_custom/vc/registry_art.py deleted file mode 100644 index 7286cf002..000000000 --- a/examples/task-plugins/dioptra_custom/vc/registry_art.py +++ /dev/null @@ -1,107 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for interfacing the |ART| with the MLFlow model registry. - -.. |ART| replace:: `Adversarial Robustness Toolbox\ - `__ -""" - -from __future__ import annotations - -from typing import Any, Dict, Optional - -import numpy as np -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -from .registry_mlflow import load_tensorflow_keras_classifier - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from art.estimators.classification import TensorFlowV2Classifier - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="art", - ) - - -try: - from tensorflow.keras import losses - from tensorflow.keras.models import Sequential - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("art", exc_type=ARTDependencyError) -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def load_wrapped_tensorflow_keras_classifier( - artifact_uri: str, - imagenet_preprocessing: bool = False, - classifier_kwargs: Optional[Dict[str, Any]] = None, -) -> TensorFlowV2Classifier: - """Loads and wraps a registered Keras classifier for compatibility with the |ART|. - - Args: - name: The name of the registered model in the MLFlow model registry. - version: The version number of the registered model in the MLFlow registry. - classifier_kwargs: A dictionary mapping argument names to values which will - be passed to the TensorFlowV2Classifier constructor. - Returns: - A trained :py:class:`~art.estimators.classification.TensorFlowV2Classifier` - object. - - See Also: - - :py:class:`art.estimators.classification.TensorFlowV2Classifier` - - :py:func:`.mlflow.load_tensorflow_keras_classifier` - """ - classifier_kwargs = classifier_kwargs or {} - keras_classifier: Sequential = load_tensorflow_keras_classifier( - uri=artifact_uri - ) - nb_classes = keras_classifier.output_shape[1] - input_shape = keras_classifier.input_shape - loss_object = losses.get(keras_classifier.loss) - preprocessing = ( - (np.array([103.939, 116.779, 123.680]), np.array([1.0, 1.0, 1.0])) - if imagenet_preprocessing - else None - ) - wrapped_keras_classifier: TensorFlowV2Classifier = TensorFlowV2Classifier( - model=keras_classifier, - nb_classes=nb_classes, - input_shape=input_shape, - loss_object=loss_object, - preprocessing=preprocessing, - **classifier_kwargs, - ) - LOGGER.info( - "Wrap Keras classifier for compatibility with Adversarial Robustness Toolbox" - ) - - return wrapped_keras_classifier diff --git a/examples/task-plugins/dioptra_custom/vc/registry_mlflow.py b/examples/task-plugins/dioptra_custom/vc/registry_mlflow.py deleted file mode 100644 index 23d8519aa..000000000 --- a/examples/task-plugins/dioptra_custom/vc/registry_mlflow.py +++ /dev/null @@ -1,120 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for using the MLFlow model registry.""" - -from __future__ import annotations - -from typing import Optional - -import mlflow -import structlog -from mlflow.entities import Run as MlflowRun -from mlflow.entities.model_registry import ModelVersion -from mlflow.tracking import MlflowClient -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from tensorflow.keras.models import Sequential - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -def add_model_to_registry( - active_run: MlflowRun, name: str, model_dir: str -) -> Optional[ModelVersion]: - """Registers a trained model logged during the current run to the MLFlow registry. - - Args: - active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's - state. - name: The registration name to use for the model. - model_dir: The relative artifact directory where MLFlow logged the model trained - during the current run. - - Returns: - A :py:class:`~mlflow.entities.model_registry.ModelVersion` object created by the - backend. - """ - if not name.strip(): - return None - - run_id: str = active_run.info.run_id - artifact_uri: str = active_run.info.artifact_uri - source: str = f"{artifact_uri}/{model_dir}" - - registered_models = [x.name for x in MlflowClient().search_registered_models()] - - if name not in registered_models: - LOGGER.info("create registered model", name=name) - MlflowClient().create_registered_model(name=name) - - LOGGER.info("create model version", name=name, source=source, run_id=run_id) - model_version: ModelVersion = MlflowClient().create_model_version( - name=name, source=source, run_id=run_id - ) - - return model_version - - -@pyplugs.register -def get_experiment_name(active_run: MlflowRun) -> str: - """Gets the name of the experiment for the current run. - - Args: - active_run: The :py:class:`mlflow.ActiveRun` object managing the current run's - state. - - Returns: - The name of the experiment. - """ - experiment_name: str = ( - MlflowClient().get_experiment(active_run.info.experiment_id).name - ) - LOGGER.info( - "Obtained experiment name of active run", experiment_name=experiment_name - ) - - return experiment_name - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def load_tensorflow_keras_classifier(uri: str) -> Sequential: - """Loads a registered Keras classifier. - - Args: - name: The name of the registered model in the MLFlow model registry. - version: The version number of the registered model in the MLFlow registry. - - Returns: - A trained :py:class:`tf.keras.Sequential` object. - """ - LOGGER.info("Load Keras classifier from model registry", uri=uri) - - return mlflow.keras.load_model(model_uri=uri) - \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/vc/tensorflow.py b/examples/task-plugins/dioptra_custom/vc/tensorflow.py deleted file mode 100644 index 1d640e2c1..000000000 --- a/examples/task-plugins/dioptra_custom/vc/tensorflow.py +++ /dev/null @@ -1,112 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -from __future__ import annotations - -from types import FunctionType -from typing import Any, Dict, List, Union - -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -from . import import_keras - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from tensorflow.keras.callbacks import Callback - from tensorflow.keras.metrics import Metric - from tensorflow.keras.optimizers import Optimizer - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def evaluate_metrics_tensorflow(classifier, dataset) -> Dict[str, float]: - result = classifier.evaluate(dataset, verbose=0) - return dict(zip(classifier.metrics_names, result)) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_optimizer(optimizer: str, learning_rate: float) -> Optimizer: - return import_keras.get_optimizer(optimizer)(learning_rate) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_model_callbacks(callbacks_list: List[Dict[str, Any]]) -> List[Callback]: - return [ - import_keras.get_callback(callback["name"])(**callback.get("parameters", {})) - for callback in callbacks_list - ] - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def get_performance_metrics( - metrics_list: List[Dict[str, Any]] -) -> List[Union[Metric, FunctionType]]: - performance_metrics: List[Metric] = [] - - for metric in metrics_list: - new_metric: Union[Metric, FunctionType] = import_keras.get_metric( - metric["name"] - ) - performance_metrics.append( - new_metric(**metric.get("parameters")) - if not isinstance(new_metric, FunctionType) and metric.get("parameters") - else new_metric - ) - - return performance_metrics - -@pyplugs.register -def process_int_list(arg: str): - lst = arg.replace('[','').replace(']', '').replace(' ','') - lst = list(map(lambda x: int(x), lst.split(','))) - return lst - -@pyplugs.register -def process_float_list(arg: str): - lst = arg.replace('[','').replace(']', '').replace(' ','') - lst = list(map(lambda x: float(x), lst.split(','))) - return lst - -@pyplugs.register -def process_float(arg: str): - return float(arg) - -@pyplugs.register -def process_int(arg: str): - return int(arg) - -@pyplugs.register -def process_bool(arg: str): - return bool(arg) - -@pyplugs.register -def get_none(arg: str): - return None \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py b/examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py deleted file mode 100644 index f7f878cd5..000000000 --- a/examples/task-plugins/dioptra_custom/vc/tracking_mlflow.py +++ /dev/null @@ -1,99 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""A task plugin module for using the MLFlow Tracking service.""" - -from __future__ import annotations - -from typing import Dict - -import mlflow -import structlog -from structlog.stdlib import BoundLogger - -from dioptra import pyplugs -from dioptra.sdk.exceptions import TensorflowDependencyError -from dioptra.sdk.utilities.decorators import require_package - -LOGGER: BoundLogger = structlog.stdlib.get_logger() - -try: - from tensorflow.keras.models import Sequential - -except ImportError: # pragma: nocover - LOGGER.warn( - "Unable to import one or more optional packages, functionality may be reduced", - package="tensorflow", - ) - - -@pyplugs.register -def log_metrics(metrics: Dict[str, float]) -> None: - """Logs metrics to the MLFlow Tracking service for the current run. - - Args: - metrics: A dictionary with the metrics to be logged. The keys are the metric - names and the values are the metric values. - - See Also: - - :py:func:`mlflow.log_metric` - """ - for metric_name, metric_value in metrics.items(): - mlflow.log_metric(key=metric_name, value=metric_value) - LOGGER.info( - "Log metric to MLFlow Tracking server", - metric_name=metric_name, - metric_value=metric_value, - ) - - -@pyplugs.register -def log_parameters(parameters: Dict[str, float]) -> None: - """Logs parameters to the MLFlow Tracking service for the current run. - - Parameters can only be set once per run. - - Args: - parameters: A dictionary with the parameters to be logged. The keys are the - parameter names and the values are the parameter values. - - See Also: - - :py:func:`mlflow.log_param` - """ - for parameter_name, parameter_value in parameters.items(): - mlflow.log_param(key=parameter_name, value=parameter_value) - LOGGER.info( - "Log parameter to MLFlow Tracking server", - parameter_name=parameter_name, - parameter_value=parameter_value, - ) - - -@pyplugs.register -@require_package("tensorflow", exc_type=TensorflowDependencyError) -def log_tensorflow_keras_estimator(estimator: Sequential, model_dir: str) -> None: - """Logs a Keras estimator trained during the current run to the MLFlow registry. - - Args: - estimator: A trained Keras estimator. - model_dir: The relative artifact directory where MLFlow should save the - model. - """ - mlflow.keras.log_model(model=estimator, artifact_path=model_dir) - LOGGER.info( - "Tensorflow Keras model logged to tracking server", - model_dir=model_dir, - ) diff --git a/examples/v1-client-tensorflow-mnist-classifier/README.md b/examples/v1-client-tensorflow-mnist-classifier/README.md deleted file mode 100644 index 55340bbdc..000000000 --- a/examples/v1-client-tensorflow-mnist-classifier/README.md +++ /dev/null @@ -1,22 +0,0 @@ -# Tensorflow MNIST Classifier demo - -This example demonstrates how to run a simple experiment on the transferability of the fast gradient method (FGM) evasion attack between two neural network architectures. -The demo can be found in the Jupyter notebook file [demo.ipynb](demo.ipynb). - -## Running the example - -To prepare your environment for running this example, follow the linked instructions below: - -1. [Create and activate a Python virtual environment and install the necessary dependencies](../README.md#creating-a-virtual-environment) -2. [Download the MNIST dataset using the download_data.py script.](../README.md#downloading-datasets) -3. [Follow the links in these User Setup instructions](../../README.md#user-setup) to do the following: - - Build the containers - - Use the cookiecutter template to generate the scripts, configuration files, and Docker Compose files you will need to run Dioptra -4. [Edit the docker-compose.yml file to mount the data folder in the worker containers](../README.md#mounting-the-data-folder-in-the-worker-containers) -5. [Initialize and start Dioptra](https://pages.nist.gov/dioptra/getting-started/running-dioptra.html#initializing-the-deployment) -6. [Register the custom task plugins for Dioptra's examples and demos](../README.md#registering-custom-task-plugins) -7. [Register the queues for Dioptra's examples and demos](../README.md#registering-queues) -8. [Start JupyterLab and open `demo.ipynb`](../README.md#starting-jupyter-lab) - -Steps 1–4 and 6–7 only need to be run once. -**Returning users only need to repeat Steps 5 (if you stopped Dioptra using `docker compose down`) and 8 (if you stopped the `jupyter lab` process)**. diff --git a/examples/v1-client-tensorflow-mnist-classifier/demo.ipynb b/examples/v1-client-tensorflow-mnist-classifier/demo.ipynb deleted file mode 100644 index 65b606d50..000000000 --- a/examples/v1-client-tensorflow-mnist-classifier/demo.ipynb +++ /dev/null @@ -1,669 +0,0 @@ -{ - "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Tensorflow MNIST Classifier demo" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "This notebook contains an end-to-end demostration of Dioptra that can be run on any modern laptop.\n", - "Please see the [example README](README.md) for instructions on how to prepare your environment for running this example." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Setup" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Below we import the necessary Python modules and ensure the proper environment variables are set so that all the code blocks will work as expected," - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "EXPERIMENT_NAME = \"mnist_fgm\"\n", - "EXPERIMENT_DESC = \"applying the fast gradient sign (FGM) attack to a classifier trained on MNIST\"\n", - "QUEUE_NAME = 'tensorflow_cpu'\n", - "QUEUE_DESC = 'Tensorflow CPU Queue'\n", - "PLUGIN_FILES = '../task-plugins/dioptra_custom/vc/'\n", - "MODEL_NAME = \"mnist_classifier\"\n", - "\n", - "# Default address for accessing the RESTful API service\n", - "RESTAPI_ADDRESS = \"http://localhost:20080\"\n", - "\n", - "# Default address for accessing the MLFlow Tracking server\n", - "MLFLOW_TRACKING_URI = \"http://localhost:35000\"" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "# Import packages from the Python standard library\n", - "import importlib.util\n", - "import os\n", - "import sys\n", - "import pprint\n", - "import time\n", - "import warnings\n", - "from pathlib import Path\n", - "from IPython.display import display, clear_output\n", - "import logging\n", - "import structlog\n", - "import yaml\n", - "\n", - "# Filter out warning messages\n", - "warnings.filterwarnings(\"ignore\")\n", - "structlog.configure(\n", - " wrapper_class=structlog.make_filtering_bound_logger(logging.CRITICAL),\n", - ")\n", - "\n", - "def register_python_source_file(module_name: str, filepath: Path) -> None:\n", - " \"\"\"Import a source file directly.\n", - "\n", - " Args:\n", - " module_name: The module name to associate with the imported source file.\n", - " filepath: The path to the source file.\n", - "\n", - " Notes:\n", - " Adapted from the following implementation in the Python documentation:\n", - " https://docs.python.org/3/library/importlib.html#importing-a-source-file-directly\n", - " \"\"\"\n", - " spec = importlib.util.spec_from_file_location(module_name, str(filepath))\n", - " module = importlib.util.module_from_spec(spec)\n", - " sys.modules[module_name] = module\n", - " spec.loader.exec_module(module)\n", - "register_python_source_file(\"scripts\", Path(\"..\", \"scripts\", \"__init__.py\"))\n", - "\n", - "# Register the examples/scripts directory as a Python module\n", - "from scripts.client import DioptraClient\n", - "from scripts.utils import make_tar\n", - "\n", - "# Set DIOPTRA_RESTAPI_URI variable if not defined, used to connect to RESTful API service\n", - "if os.getenv(\"DIOPTRA_RESTAPI_URI\") is None:\n", - " os.environ[\"DIOPTRA_RESTAPI_URI\"] = RESTAPI_ADDRESS\n", - "\n", - "# Set MLFLOW_TRACKING_URI variable, used to connect to MLFlow Tracking service\n", - "if os.getenv(\"MLFLOW_TRACKING_URI\") is None:\n", - " os.environ[\"MLFLOW_TRACKING_URI\"] = MLFLOW_TRACKING_URI" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Dataset" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "We obtained a copy of the MNIST dataset when we ran `download_data.py` script. If you have not done so already, see [How to Obtain Common Datasets](https://pages.nist.gov/dioptra/getting-started/acquiring-datasets.html).\n", - "The training and testing images for the MNIST dataset are stored within the `/dioptra/data/Mnist` directory as PNG files that are organized into the following folder structure," - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - " Mnist\n", - " ├── testing\n", - " │ ├── 0\n", - " │ ├── 1\n", - " │ ├── 2\n", - " │ ├── 3\n", - " │ ├── 4\n", - " │ ├── 5\n", - " │ ├── 6\n", - " │ ├── 7\n", - " │ ├── 8\n", - " │ └── 9\n", - " └── training\n", - " ├── 0\n", - " ├── 1\n", - " ├── 2\n", - " ├── 3\n", - " ├── 4\n", - " ├── 5\n", - " ├── 6\n", - " ├── 7\n", - " ├── 8\n", - " └── 9" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The subfolders under `training/` and `testing/` are the classification labels for the images in the dataset.\n", - "This folder structure is a standardized way to encode the label information and many libraries can make use of it, including the Tensorflow library that we are using for this particular demo." - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Submit and run jobs" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "To connect with the endpoint, we will use a client class defined in the `examples/scripts/client.py` file that is able to connect with the Dioptra RESTful API using the HTTP protocol.\n", - "We connect using the client below.\n", - "The client uses the environment variable `DIOPTRA_RESTAPI_URI`, which we configured at the top of the notebook, to figure out how to connect to the Dioptra RESTful API." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "tags": [] - }, - "outputs": [], - "source": [ - "client = DioptraClient()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "It is necessary to login to the RESTAPI to be able to perform any functions. Here we create a user if it is not created already, and login with it." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "try:\n", - " client.users.create('pluginuser','pluginuser@dioptra.nccoe.nist.gov','pleasemakesuretoPLUGINthecomputer','pleasemakesuretoPLUGINthecomputer')\n", - "except:\n", - " pass # ignore if user exists already\n", - "client.auth.login('pluginuser','pleasemakesuretoPLUGINthecomputer')" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following function can be used to clear all experiments, entrypoints, jobs, models, plugins, tags, and queues in the database, if a fresh start is desired. It is not currently used anywhere in this notebook, but is included for utility." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def delete_all():\n", - " for d in client.experiments.get_all(pageLength=100000)['data']:\n", - " client.experiments.delete_by_id(d['id'])\n", - " for d in client.entrypoints.get_all(pageLength=100000)['data']:\n", - " client.entrypoints.delete_by_id(d['id'])\n", - " for d in client.jobs.get_all(pageLength=100000)['data']:\n", - " client.jobs.delete_by_id(d['id'])\n", - " for d in client.models.get_all(pageLength=100000)['data']:\n", - " client.models.delete_by_id(d['id'])\n", - " for d in client.plugins.get_all(pageLength=100000)['data']:\n", - " try:\n", - " client.plugins.delete_by_id(d['id'])\n", - " except:\n", - " pass\n", - " for d in client.tags.get_all(pageLength=100000)['data']:\n", - " client.tags.delete_by_id(d['id'])\n", - " for d in client.pluginParameterTypes.get_all(pageLength=100000)['data']:\n", - " try:\n", - " client.pluginParameterTypes.delete_by_id(d['id'])\n", - " except:\n", - " pass\n", - " for d in client.queues.get_all(pageLength=100000)['data']:\n", - " client.queues.delete_by_id(d['id'])" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The following functions are used for registering plugins located in the `../examples/task-plugins/` folder, associating them with endpoints in the ./src/ folder, and then associating those endpoints with an experiment. When `run_experiment` is called, it will create plugins based on the YML files provided, and upload any additional files in the directory specified by `PLUGIN_FILES` at the top of the notebook." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true, - "tags": [] - }, - "outputs": [], - "source": [ - "basic_types = ['integer', 'string', 'number', 'any', 'boolean', 'null']\n", - "\n", - "def create_or_get_experiment(group, name, description, entrypoints):\n", - " found = None\n", - " for exp in client.experiments.get_all(search=name,pageLength=100000)['data']:\n", - " if exp['name'] == name:\n", - " found = exp\n", - " if (found != None):\n", - " client.experiments.modify_by_id(found['id'], name, description, entrypoints)\n", - " return found\n", - " else:\n", - " return client.experiments.create(group, name, description, entrypoints)\n", - "def create_or_get_entrypoints(group, name, description, taskGraph, parameters, queues, plugins):\n", - " found = None\n", - " for entrypoint in client.entrypoints.get_all(search=name,pageLength=100000)['data']:\n", - " if entrypoint['name'] == name:\n", - " found = entrypoint\n", - " if (found != None):\n", - " client.entrypoints.modify_by_id(found['id'], name, description, taskGraph, parameters, queues)\n", - " client.entrypoints.add_plugins_by_entrypoint_id(found['id'], plugins)\n", - " return found\n", - " else:\n", - " return client.entrypoints.create(group, name, description, taskGraph, parameters, queues, plugins)\n", - "def create_or_get_plugin_type(group, name, description, structure):\n", - " ret = None\n", - " for pt in client.pluginParameterTypes.get_all(pageLength=100000)['data']:\n", - " if (pt['name'] == name):\n", - " ret = pt\n", - " if (ret is None):\n", - " ret = client.pluginParameterTypes.create(group, name, description, structure)\n", - " return ret\n", - "def find_plugin_type(name, types):\n", - " for t in types.keys():\n", - " if t == name:\n", - " return create_or_get_plugin_type(1, name, name, types[t])['id']\n", - " for t in basic_types:\n", - " if t == name:\n", - " return create_or_get_plugin_type(1, name, 'primitive', {})['id']\n", - "\n", - " print(\"Couldn't find type\", name, \"in types definition.\")\n", - "\n", - "def create_or_get_queue(group, name, description):\n", - " ret = None\n", - " for queue in client.queues.get_all(pageLength=100000)['data']:\n", - " if queue['name'] == name:\n", - " ret = queue\n", - " if (ret is None):\n", - " ret = client.queues.create(group, name, description)\n", - " return ret\n", - "def plugin_to_py(plugin):\n", - " return '../task-plugins/' + '/'.join(plugin.split('.')[:-1]) + '.py'\n", - "def create_inputParam_object(inputs, types):\n", - " ret = []\n", - " for inp in inputs:\n", - " if 'name' in inp:\n", - " inp_name = inp['name']\n", - " inp_type = inp['type']\n", - " else:\n", - " inp_name = list(inp.keys())[0]\n", - " inp_type = inp[inp_name]\n", - " if 'required' in inp:\n", - " inp_req = inp['required']\n", - " else:\n", - " inp_req = True\n", - " inp_type = find_plugin_type(inp_type, types)\n", - " ret += [{\n", - " 'name': inp_name,\n", - " 'parameterType': inp_type,\n", - " 'required': inp_req\n", - " }]\n", - " return ret\n", - "def create_outputParam_object(outputs, types):\n", - " ret = []\n", - " for outp in outputs:\n", - " if isinstance(outp, dict):\n", - " outp_name = list(outp.keys())[0]\n", - " outp_type = outp[outp_name]\n", - " else:\n", - " outp_name = outp\n", - " outp_type = outputs[outp_name]\n", - " outp_type = find_plugin_type(outp_type, types)\n", - " ret += [{\n", - " 'name': outp_name,\n", - " 'parameterType': outp_type,\n", - " }]\n", - " return ret\n", - "\n", - "def read_yaml(filename):\n", - " with open(filename) as stream:\n", - " try:\n", - " ret = yaml.safe_load(stream)\n", - " except yaml.YAMLError as exc:\n", - " print(exc)\n", - " return ret\n", - "def register_basic_types(declared):\n", - " for q in basic_types:\n", - " type_def = create_or_get_plugin_type(1, q, 'primitive', {})\n", - " for q in declared:\n", - " type_def = create_or_get_plugin_type(1, q, 'declared', declared[q])\n", - "def get_plugins_to_register(yaml_file, plugins_to_upload=None):\n", - " plugins_to_upload = {} if plugins_to_upload is None else plugins_to_upload\n", - " yaml = read_yaml(yaml_file)\n", - " task_graph = yaml['graph']\n", - " plugins = yaml['tasks']\n", - " types = yaml['types']\n", - " \n", - " register_basic_types(types)\n", - " tasks = []\n", - " for plugin in plugins:\n", - " name = plugin\n", - " definition = plugins[plugin]\n", - " python_file = plugin_to_py(definition['plugin'])\n", - " upload = {}\n", - " upload['name'] = name\n", - " if 'inputs' in definition:\n", - " inputs = definition['inputs']\n", - " upload['inputParams'] = create_inputParam_object(inputs, types)\n", - " else:\n", - " upload['inputParams'] = []\n", - " if 'outputs' in definition:\n", - " outputs = definition['outputs']\n", - " upload['outputParams'] = create_outputParam_object(outputs, types) \n", - " else:\n", - " upload['outputParams'] = []\n", - " if (python_file in plugins_to_upload):\n", - " plugins_to_upload[python_file] += [upload]\n", - " else:\n", - " plugins_to_upload[python_file] = [upload]\n", - " return plugins_to_upload\n", - "def create_or_get_plugin(group, name, description):\n", - " ret = None\n", - " for plugin in client.plugins.get_all(search=name,pageLength=100000)['data']:\n", - " if plugin['name'] == name:\n", - " ret = plugin\n", - " if (ret is None):\n", - " ret = client.plugins.create(group, name, description)\n", - " return ret\n", - "def create_or_modify_plugin_file(plugin_id, filename, contents, description, tasks):\n", - " found = None\n", - " for plugin_file in client.plugins.files.get_files_by_plugin_id(plugin_id, pageLength=100000)['data']:\n", - " if plugin_file['filename'] == filename:\n", - " found = plugin_file\n", - " if (found != None):\n", - " return client.plugins.files.modify_files_by_plugin_id_file_id(plugin_id, found['id'], filename, contents, description, tasks)\n", - " else:\n", - " return client.plugins.files.create_files_by_plugin_id(plugin_id, filename, contents, description, tasks)\n", - "def register_plugins(group, plugins_to_upload):\n", - " plugins = []\n", - " for plugin_file in plugins_to_upload.keys():\n", - " plugin_path = Path(plugin_file)\n", - " contents = plugin_path.read_text().replace(\"\\r\", '')\n", - " tasks = plugins_to_upload[plugin_file]\n", - " filename = plugin_path.name\n", - " description = 'custom plugin for ' + filename\n", - " plugin_id = create_or_get_plugin(group, plugin_path.parent.name, description)['id']\n", - " plugins += [plugin_id]\n", - " uploaded_file = create_or_modify_plugin_file(plugin_id, filename, contents, description, tasks)\n", - " return list(set(plugins))\n", - "def create_parameters_object(params, modify):\n", - " ret = []\n", - " type_map = {'int': 'float', 'float':'float', 'string':'string'}\n", - " for p in params:\n", - " if (type(params[p]).__name__ in type_map.keys()):\n", - " paramType = type_map[type(params[p]).__name__]\n", - " paramType='string' # TODO: remove if backend can handle types correctly\n", - " defaultValue = str(params[p])\n", - " else:\n", - " defaultValue = str(params[p])\n", - " paramType = 'string'\n", - "\n", - " if p in modify.keys():\n", - " defaultValue = str(modify[p])\n", - " name = p\n", - " param_obj = {\n", - " 'name': name,\n", - " 'defaultValue': str(defaultValue),\n", - " 'parameterType': paramType\n", - " }\n", - " ret += [param_obj]\n", - " return ret\n", - "def get_graph_for_upload(yaml_text):\n", - " i = 0\n", - " for line in yaml_text:\n", - " if line.startswith(\"graph:\"):\n", - " break\n", - " i += 1\n", - " return ''.join(yaml_text[i+1:])\n", - "def get_parameters_for_upload(yaml_text):\n", - " i = 0\n", - " for line in yaml_text:\n", - " if line.startswith(\"parameters:\"):\n", - " start = i\n", - " if line.startswith(\"tasks:\"):\n", - " break\n", - " i += 1\n", - " return yaml_text[start:i+1]\n", - "def register_entrypoint(group, name, description, queues, plugins, yaml_file, modify_params=None):\n", - " modify_params = {} if modify_params is None else modify_params\n", - " yaml = read_yaml(yaml_file)\n", - " #task_graph = yaml['graph']\n", - " parameters = yaml['parameters']\n", - " \n", - " with open(yaml_file, 'r') as f:\n", - " lines = f.readlines()\n", - " task_graph = get_graph_for_upload(lines).replace('\\r','')\n", - " \n", - " entrypoint = create_or_get_entrypoints(1, name, description, task_graph, create_parameters_object(parameters, modify_params), queues, plugins)\n", - " return entrypoint\n", - "def add_missing_plugin_files(location, upload):\n", - " p = Path(location)\n", - " for child in p.iterdir():\n", - " if (child.name.endswith('.py')):\n", - " if (str(child) not in upload.keys()):\n", - " upload[str(child)] = []\n", - " return upload" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`run_experiment` uses the helper functions above to do the following tasks:\n", - " - create a queue specified by `QUEUE_NAME` if needed\n", - " - upload the plugins used by the specified `entrypoint` \n", - " - upload any other plugin files in the directory `PLUGIN_FILES`\n", - " - register the entrypoint in Dioptra\n", - " - create the experiment (if needed) and associate the entrypoint with the experiment\n", - " - start a job for the specified `entrypoint` on the queue `QUEUE_NAME`\n", - "Note that any parameters passed in to `parameters` will overwrite the defaults in the specified YML file." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def run_experiment(entrypoint, entrypoint_name, entrypoint_desc, job_time_limit, parameters={}):\n", - " upload = get_plugins_to_register(entrypoint, {})\n", - " upload = add_missing_plugin_files(PLUGIN_FILES, upload)\n", - " queue = create_or_get_queue(1, QUEUE_NAME, QUEUE_DESC)\n", - " queues = [queue['id']]\n", - " plugins = register_plugins(1,upload)\n", - " entrypoint = register_entrypoint(1, entrypoint_name, entrypoint_desc, queues, plugins, entrypoint, parameters)\n", - " experiment = create_or_get_experiment(1, EXPERIMENT_NAME, EXPERIMENT_DESC, [entrypoint['id']])\n", - " return client.experiments.create_jobs_by_experiment_id(experiment['id'], entrypoint_desc, queue['id'], entrypoint['id'], {}, job_time_limit)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "`wait_for_job` stalls til the previous job was finished, which is useful for jobs which depend on the output of other jobs." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def wait_for_job(job, job_name):\n", - " n = 0\n", - " while job['status'] != 'finished': \n", - " job = client.jobs.get_by_id(job['id'])\n", - " time.sleep(1)\n", - " clear_output(wait=True)\n", - " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", - " n += 1\n", - " clear_output(wait=True)\n", - " display(f\"Job finished. Starting {job_name} job.\")\n", - " " - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Next, we need to train our model. This particular entrypoint uses a LeNet-5 model.\n", - "Depending on the specs of your computer, it can take 5-20 minutes or longer to complete.\n", - "If you are fortunate enough to have access to a dedicated GPU, then the training time will be much shorter." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": { - "scrolled": true - }, - "outputs": [], - "source": [ - "entrypoint = 'src/train.yml'\n", - "entrypoint_name = 'train'\n", - "entrypoint_desc = 'training a classifier on MNIST'\n", - "job_time_limit = '1h'\n", - "\n", - "training_job = run_experiment(entrypoint, \n", - " entrypoint_name, \n", - " entrypoint_desc,\n", - " job_time_limit,\n", - " {\"epochs_p\":1})\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Now that we have trained a model, next we will apply the fast-gradient method (FGM) evasion attack on it to generate adversarial images.\n", - "\n", - "This specific workflow is an example of jobs that contain dependencies, as the metric evaluation jobs cannot start until the adversarial image generation jobs have completed, and the adversarial image generation job cannot start until the training job has completed.\n", - "\n", - "Note that the training_job id is needed to tell the FGM attack which model to generate examples against." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "entrypoint = 'src/fgm.yml'\n", - "entrypoint_name = 'fgm'\n", - "entrypoint_desc = 'generating examples on mnist_classifier using the fgm attack'\n", - "job_time_limit = '1h'\n", - "\n", - "wait_for_job(training_job, entrypoint_name)\n", - "fgm_job = run_experiment(entrypoint,\n", - " entrypoint_name,\n", - " entrypoint_desc,\n", - " job_time_limit,\n", - " {\"training_job_id\": training_job['id']})\n" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, we can test out the results of our adversarial attack on the model we trained earlier. This will wait for the FGM job to finish, and then evaluate the model's performance on the adversarial examples. Note that we need to know both the `fgm_job` id as well as the `training_job` id, so that this entrypoint knows which run's adversarial examples to test against which model. \n", - "\n", - "The previous runs are all stored in Dioptra as well, so you can always go back later and retrieve examples, models, and even the code used to create them." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "entrypoint = 'src/infer.yml'\n", - "entrypoint_name = 'infer'\n", - "entrypoint_desc = 'evaluating performance of mnist_classifier on generated fgm examples'\n", - "job_time_limit = '1h'\n", - "\n", - "wait_for_job(fgm_job, entrypoint_name)\n", - "infer_job = run_experiment(entrypoint, \n", - " entrypoint_name,\n", - " entrypoint_desc,\n", - " job_time_limit,\n", - " {\"fgm_job_id\": fgm_job['id'], \"training_job_id\": training_job['id']})\n" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "from mlflow.tracking import MlflowClient\n", - "from uuid import UUID\n", - "mlflow_client = MlflowClient()\n", - "mlflow_runid = UUID(client.jobs.get_mlflow_run_id(infer_job['id'])['mlflowRunId']).hex\n", - "mlflow_run = mlflow_client.get_run(mlflow_runid)\n", - "pprint.pprint(mlflow_run.data.metrics)" - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [] - } - ], - "metadata": { - "interpreter": { - "hash": "edee40310913f16e2ca02c1d37887bcb7f07f00399ca119bb7e27de7d632ea99" - }, - "kernelspec": { - "display_name": "Python 3 (ipykernel)", - "language": "python", - "name": "python3" - }, - "language_info": { - "codemirror_mode": { - "name": "ipython", - "version": 3 - }, - "file_extension": ".py", - "mimetype": "text/x-python", - "name": "python", - "nbconvert_exporter": "python", - "pygments_lexer": "ipython3", - "version": "3.10.13" - } - }, - "nbformat": 4, - "nbformat_minor": 4 -} diff --git a/examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml b/examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml deleted file mode 100644 index 8d86b3ded..000000000 --- a/examples/v1-client-tensorflow-mnist-classifier/src/fgm.yml +++ /dev/null @@ -1,320 +0,0 @@ -types: - rng: - path: - path_string: - union: [string, any] - path_string_null: - union: [path_string, "null"] - dirs: - list: path_string - parameters: - mapping: [string, number] - kwargs: - mapping: [string, any] - kwargs_null: - union: [kwargs, "null"] - keras_classifier: - distance_metric_request: - mapping: [string, string] - distance_metrics_requests: - list: - mapping: [string, string] - distance_metric: - tuple: [string, any] - distance_metrics: - list: - tuple: [string, any] - distance_metrics_null: - union: - - list: - tuple: [string, any] - - "null" - dataframe: - image_size: - tuple: [integer, integer, integer] - clip_values: - tuple: [float, float, float] - norm: - union: [integer, number, string] - -parameters: - data_dir: /dioptra/data/Mnist/testing - image_size_p: [28, 28, 1] - adv_tar_name: testing_adversarial_fgm.tar.gz - adv_data_dir: adv_testing - distance_metrics_filename: distance_metrics.csv - training_job_id: - clip_values_p: [0, 1] - batch_size_p: 32 - eps_p: 0.3 - eps_step_p: 0.1 - minimal_p: false - norm: "inf" - seed_p: -1 - -tasks: - init_rng: - plugin: dioptra_custom.vc.random_rng.init_rng - inputs: - - name: seed - type: integer - required: false - outputs: - - ret1: integer - - ret2: rng - - draw_random_integer: - plugin: dioptra_custom.vc.random_sample.draw_random_integer - inputs: - - rng: rng - - name: low - type: integer - required: false - - name: high - type: integer - required: false - outputs: - value: integer - - init_tensorflow: - plugin: dioptra_custom.vc.backend_configs_tensorflow.init_tensorflow - inputs: - - seed: integer - - make_directories: - plugin: dioptra_custom.vc.artifacts_utils.make_directories - inputs: - - dirs: dirs - - log_parameters: - plugin: dioptra_custom.vc.tracking_mlflow.log_parameters - inputs: - - parameters: parameters - - load_wrapped_tensorflow_keras_classifier: - plugin: dioptra_custom.vc.registry_art.load_wrapped_tensorflow_keras_classifier - inputs: - - artifact_uri: string - - name: classifier_kwargs - type: kwargs - required: false - outputs: - classifier: keras_classifier - - get_distance_metric_list: - plugin: dioptra_custom.vc.metrics_distance.get_distance_metric_list - inputs: - - request: distance_metrics_requests - outputs: - distance_metrics_list: distance_metrics - - create_adversarial_fgm_dataset: - plugin: dioptra_custom.vc.attacks_fgm.create_adversarial_fgm_dataset - inputs: - - data_dir: string - - adv_data_dir: path_string - - keras_classifier: keras_classifier - - image_size: image_size - - name: distance_metrics_list - type: distance_metrics_null - required: false - - name: rescale - type: number - required: false - - name: batch_size - type: integer - required: false - - name: label_mode - type: string - required: false - - name: eps - type: number - required: false - - name: eps_step - type: number - required: false - - name: minimal - type: boolean - required: false - - name: norm - type: norm - required: false - outputs: - dataset: dataframe - - upload_directory_as_tarball_artifact: - plugin: dioptra_custom.vc.artifacts_mlflow.upload_directory_as_tarball_artifact - inputs: - - source_dir: path_string - - tarball_filename: string - - name: tarball_write_mode - type: string - required: false - - name: working_dir - type: path_string_null - required: false - - upload_data_frame_artifact: - plugin: dioptra_custom.vc.artifacts_mlflow.upload_data_frame_artifact - inputs: - - data_frame: dataframe - - file_name: string - - file_format: string - - name: file_format_kwargs - type: kwargs_null - required: false - - name: working_dir - type: path_string_null - required: false - get_uri_for_artifact: - plugin: dioptra_custom.vc.artifacts_restapi.get_uri_for_artifact - inputs: - - job_id: string - - name: index - type: integer - required: false - outputs: - ret: string - get_none: - plugin: dioptra_custom.vc.tensorflow.get_none - inputs: - - arg: string - outputs: - ret: "null" - process_float: - plugin: dioptra_custom.vc.tensorflow.process_float - inputs: - - arg: string - outputs: - ret: number - process_bool: - plugin: dioptra_custom.vc.tensorflow.process_bool - inputs: - - arg: string - outputs: - ret: boolean - process_int: - plugin: dioptra_custom.vc.tensorflow.process_int - inputs: - - arg: string - outputs: - ret: integer - process_int_list: - plugin: dioptra_custom.vc.tensorflow.process_int_list - inputs: - - arg: string - outputs: - ret: image_size - process_float_list: - plugin: dioptra_custom.vc.tensorflow.process_float_list - inputs: - - arg: string - outputs: - ret: image_size - -graph: - image_size: - process_int_list: $image_size_p - - clip_values: - process_float_list: $clip_values_p - - batch_size: - process_int: $batch_size_p - - eps: - process_float: $eps_p - - eps_step: - process_float: $eps_step_p - - minimal: - process_bool: $minimal_p - - true_none: - get_none: 'None' - - true_false: - process_bool: 'False' - - seed: - process_int: $seed_p - - init_rng: - init_rng: $seed - - global_seed: - draw_random_integer: - rng: $init_rng.ret2 - - dataset_seed: - draw_random_integer: - rng: $init_rng.ret2 - - init_tensorflow_results: - init_tensorflow: $global_seed - - make_directories_results: - make_directories: [[$adv_data_dir]] - - log_mlflow_params_result: - log_parameters: - - entry_point_seed: $seed - tensorflow_global_seed: $global_seed - dataset_seed: $dataset_seed - - artifact_uri: - get_uri_for_artifact: - job_id: $training_job_id - - keras_classifier: - load_wrapped_tensorflow_keras_classifier: - artifact_uri: $artifact_uri - classifier_kwargs: - clip_values: $clip_values - dependencies: init_tensorflow_results - - distance_metrics: - get_distance_metric_list: - - - name: l_infinity_norm - func: l_inf_norm - - name: l_1_norm - func: l_1_norm - - name: l_2_norm - func: l_2_norm - - name: cosine_similarity - func: paired_cosine_similarities - - name: euclidean_distance - func: paired_euclidean_distances - - name: manhattan_distance - func: paired_manhattan_distances - - name: wasserstein_distance - func: paired_wasserstein_distances - - dataset: - create_adversarial_fgm_dataset: - data_dir: $data_dir - keras_classifier: $keras_classifier - distance_metrics_list: $distance_metrics - adv_data_dir: $adv_data_dir - batch_size: $batch_size - image_size: $image_size - eps: $eps - eps_step: $eps_step - minimal: $minimal - norm: $norm - dependencies: make_directories_results - - upload_directory: - upload_directory_as_tarball_artifact: - - $adv_data_dir - - $adv_tar_name - dependencies: dataset - - upload_dataset: - upload_data_frame_artifact: - data_frame: $dataset - file_name: $distance_metrics_filename - file_format: csv.gz - file_format_kwargs: - index: $true_false diff --git a/examples/v1-client-tensorflow-mnist-classifier/src/infer.yml b/examples/v1-client-tensorflow-mnist-classifier/src/infer.yml deleted file mode 100644 index 1bece3443..000000000 --- a/examples/v1-client-tensorflow-mnist-classifier/src/infer.yml +++ /dev/null @@ -1,255 +0,0 @@ -types: - rng: - path: - sequential: - number_null: - union: [number, "null"] - string_null: - union: [string, "null"] - path_string: - union: [string, path] - path_string_null: - union: [path_string, "null"] - dirs: - list: path_string - directory_iterator: - parameters: - mapping: [string, number] - kwargs: - mapping: [string, any] - kwargs_null: - union: [kwargs, "null"] - keras_classifier: - eval_metric: - mapping: [string, any] - eval_metric_results: - mapping: [string, number] - dataframe: - image_size: - tuple: [integer, integer, integer] - norm: - union: [integer, number, string] - -parameters: - run_id: "" - image_size_p: [28, 28, 1] - training_job_id: - fgm_job_id: - adv_tar_name: testing_adversarial_fgm.tar.gz - adv_data_dir: adv_testing - seed_p: -1 - -tasks: - init_rng: - plugin: dioptra_custom.vc.random_rng.init_rng - inputs: - - name: seed - type: integer - required: false - outputs: - - ret1: integer - - ret2: rng - - draw_random_integer: - plugin: dioptra_custom.vc.random_sample.draw_random_integer - inputs: - - rng: rng - - name: low - type: integer - required: false - - name: high - type: integer - required: false - outputs: - value: integer - - init_tensorflow: - plugin: dioptra_custom.vc.backend_configs_tensorflow.init_tensorflow - inputs: - - seed: integer - - log_parameters: - plugin: dioptra_custom.vc.tracking_mlflow.log_parameters - inputs: - - parameters: parameters - - download_all_artifacts_for_job: - plugin: dioptra_custom.vc.artifacts_mlflow.download_all_artifacts_for_job - inputs: - - job_id: string - - artifact_path: string - - name: destination_path - type: string_null - required: false - outputs: - download_path: string - - extract_tarfile: - plugin: dioptra_custom.vc.artifacts_utils.extract_tarfile - inputs: - - filepath: path_string - - name: tarball_read_mode - type: string - required: false - - name: output_dir - type: any - required: false - - create_image_dataset: - plugin: dioptra_custom.vc.data_tensorflow.create_image_dataset - inputs: - - data_dir: string - - subset: string_null - - image_size: image_size - - seed: integer - - name: rescale - type: number - required: false - - name: validation_split - type: number_null - required: false - - name: batch_size - type: integer - required: false - - name: label_mode - type: string - required: false - outputs: - iterator: directory_iterator - - load_tensorflow_keras_classifier: - plugin: dioptra_custom.vc.registry_mlflow.load_tensorflow_keras_classifier - inputs: - - uri: string - outputs: - classifier: sequential - get_uri_for_artifact: - plugin: dioptra_custom.vc.artifacts_restapi.get_uri_for_artifact - inputs: - - job_id: string - - name: index - type: integer - required: false - outputs: - ret: string - - evaluate_metrics_tensorflow: - plugin: dioptra_custom.vc.tensorflow.evaluate_metrics_tensorflow - inputs: - - classifier: any - - dataset: any - outputs: - metrics: eval_metric_results - log_metrics: - plugin: dioptra_custom.vc.tracking_mlflow.log_metrics - inputs: - - metrics: eval_metric_results - get_none: - plugin: dioptra_custom.vc.tensorflow.get_none - inputs: - - arg: string - outputs: - ret: "null" - process_float: - plugin: dioptra_custom.vc.tensorflow.process_float - inputs: - - arg: string - outputs: - ret: number - process_bool: - plugin: dioptra_custom.vc.tensorflow.process_bool - inputs: - - arg: string - outputs: - ret: boolean - process_int: - plugin: dioptra_custom.vc.tensorflow.process_int - inputs: - - arg: string - outputs: - ret: integer - process_int_list: - plugin: dioptra_custom.vc.tensorflow.process_int_list - inputs: - - arg: string - outputs: - ret: image_size - process_float_list: - plugin: dioptra_custom.vc.tensorflow.process_float_list - inputs: - - arg: string - outputs: - ret: image_size - - -graph: - - image_size: - process_int_list: $image_size_p - - seed: - process_int: $seed_p - - init_rng: - init_rng: $seed - - tensorflow_global_seed: - draw_random_integer: - rng: $init_rng.ret2 - - dataset_seed: - draw_random_integer: - rng: $init_rng.ret2 - - init_tensorflow_results: - init_tensorflow: $tensorflow_global_seed - - log_mlflow_params_result: - log_parameters: - - entry_point_seed: $seed - tensorflow_global_seed: $tensorflow_global_seed - dataset_seed: $dataset_seed - - adv_tar_path: - download_all_artifacts_for_job: - job_id: $fgm_job_id - artifact_path: $adv_tar_name - - extract_tarfile_results: - extract_tarfile: - filepath: $adv_tar_path - - adv_ds: - create_image_dataset: - data_dir: $adv_data_dir - subset: null - validation_split: null - image_size: $image_size - seed: $dataset_seed - dependencies: - - init_tensorflow_results - - extract_tarfile_results - - model_uri: - get_uri_for_artifact: - job_id: $training_job_id - - classifier: - load_tensorflow_keras_classifier: - uri: $model_uri - dependencies: - - init_tensorflow_results - - adv_ds - - classifier_performance_metrics: - evaluate_metrics_tensorflow: - classifier: $classifier - dataset: $adv_ds - dependencies: - - classifier - - logged_metrics: - log_metrics: - metrics: $classifier_performance_metrics - dependencies: - - classifier_performance_metrics diff --git a/examples/v1-client-tensorflow-mnist-classifier/src/train.yml b/examples/v1-client-tensorflow-mnist-classifier/src/train.yml deleted file mode 100644 index a03403d9f..000000000 --- a/examples/v1-client-tensorflow-mnist-classifier/src/train.yml +++ /dev/null @@ -1,371 +0,0 @@ -types: - rng: - optimizer: - name_parameters: - mapping: - name: string - parameters: - mapping: [string, any] - metrics_list: - list: - mapping: - name: string - parameters: - mapping: [string, any] - performance_metrics: - metrics: - callbacks_in: - list: - mapping: - name: string - parameters: - mapping: [string, any] - callbacks_out: - mapping: - name: string - parameters: - mapping: [string, any] - directory_iterator: - parameters: - mapping: [string, number] - image_size: - tuple: [integer, integer, integer] - sequential: - fit_kwargs: - mapping: [string, any] - fit_kwargs_null: - union: - - mapping: [string, any] - - "null" - str_null: - union: [string, "null"] - num_null: - union: [number, "null"] - -parameters: - seed_p: -1 - optimizer_name: Adam - learning_rate_p: 0.001 - training_dir: /dioptra/data/Mnist/training - testing_dir: /dioptra/data/Mnist/testing - image_size_p: [28, 28, 1] - validation_split_p: 0.2 - batch_size_p: 32 - model_architecture: le_net - epochs_p: 30 - register_model_name: "mnist_classifier" - -tasks: - init_rng: - plugin: dioptra_custom.vc.random_rng.init_rng - inputs: - - name: seed - type: integer - required: false - outputs: - - ret1: integer - - ret2: rng - - draw_random_integer: - plugin: dioptra_custom.vc.random_sample.draw_random_integer - inputs: - - rng: rng - - name: low - type: integer - required: false - - name: high - type: integer - required: false - outputs: - value: integer - - init_tensorflow: - plugin: dioptra_custom.vc.backend_configs_tensorflow.init_tensorflow - inputs: - - seed: integer - - log_parameters: - plugin: dioptra_custom.vc.tracking_mlflow.log_parameters - inputs: - - parameters: parameters - - get_optimizer: - plugin: dioptra_custom.vc.tensorflow.get_optimizer - inputs: - - name: optimizer - type: string - - learning_rate: number - outputs: - optimizer: optimizer - - get_performance_metrics: - plugin: dioptra_custom.vc.tensorflow.get_performance_metrics - inputs: - - metrics_list: metrics_list - outputs: - performance_metrics: performance_metrics - - get_model_callbacks: - plugin: dioptra_custom.vc.tensorflow.get_model_callbacks - inputs: - - callbacks_list: callbacks_in - outputs: - callbacks: callbacks_out - - create_image_dataset: - plugin: dioptra_custom.vc.data_tensorflow.create_image_dataset - inputs: - - data_dir: string - - subset: str_null - - image_size: image_size - - seed: integer - - name: rescale - type: number - required: false - - name: validation_split - type: num_null - required: false - - name: batch_size - type: integer - required: false - - name: label_mode - type: string - required: false - outputs: - dataset: directory_iterator - - get_n_classes_from_directory_iterator: - plugin: dioptra_custom.vc.data_tensorflow.get_n_classes_from_directory_iterator - inputs: - - ds: directory_iterator - outputs: - num_classes: integer - - init_classifier: - plugin: dioptra_custom.vc.estimators_keras_classifiers.init_classifier - inputs: - - model_architecture: string - - optimizer: optimizer - - metrics: performance_metrics - - input_shape: image_size - - n_classes: integer - - name: loss - type: string - required: false - outputs: - classifier: sequential - - fit: - plugin: dioptra_custom.vc.estimators_methods.fit - inputs: - - estimator: any - - x: any - - name: y - type: any - required: false - - name: fit_kwargs - type: fit_kwargs_null - required: false - - evaluate_metrics_tensorflow: - plugin: dioptra_custom.vc.tensorflow.evaluate_metrics_tensorflow - inputs: - - classifier: sequential - - dataset: directory_iterator - outputs: - metrics: metrics - - log_metrics: - plugin: dioptra_custom.vc.tracking_mlflow.log_metrics - inputs: - - metrics: metrics - - log_tensorflow_keras_estimator: - plugin: dioptra_custom.vc.tracking_mlflow.log_tensorflow_keras_estimator - inputs: - - estimator: sequential - - model_dir: string - - add_model_to_registry: - plugin: dioptra_custom.vc.mlflow.add_model_to_registry - inputs: - - name: name - type: string - - model_dir: string - get_none: - plugin: dioptra_custom.vc.tensorflow.get_none - inputs: - - arg: string - outputs: - ret: "null" - process_float: - plugin: dioptra_custom.vc.tensorflow.process_float - inputs: - - arg: string - outputs: - ret: number - process_int: - plugin: dioptra_custom.vc.tensorflow.process_int - inputs: - - arg: string - outputs: - ret: integer - process_int_list: - plugin: dioptra_custom.vc.tensorflow.process_int_list - inputs: - - arg: string - outputs: - ret: image_size - -graph: - batch_size: - process_int: $batch_size_p - - epochs: - process_int: $epochs_p - - validation_split: - process_float: $validation_split_p - - true_none: - get_none: 'h' - - image_size: - process_int_list: $image_size_p - - learning_rate: - process_float: $learning_rate_p - - seed: - process_int: $seed_p - - init_rng: - init_rng: $seed - - global_seed: - draw_random_integer: - rng: $init_rng.ret2 - - dataset_seed: - draw_random_integer: - rng: $init_rng.ret2 - - init_tensorflow: - init_tensorflow: $global_seed - - log_params: - log_parameters: - - entry_point_seed: $init_rng.ret1 - tensorflow_global_seed: $global_seed - dataset_seed: $dataset_seed - - optimizer: - get_optimizer: - optimizer: $optimizer_name - learning_rate: $learning_rate - dependencies: - - init_tensorflow - - perf_metrics: - get_performance_metrics: - - - name: CategoricalAccuracy - parameters: { name: accuracy } - - name: Precision - parameters: { name: precision } - - name: Recall - parameters: { name: recall } - - name: AUC - parameters: { name: auc } - dependencies: - - init_tensorflow - - callbacks: - get_model_callbacks: - - - name: EarlyStopping - parameters: - monitor: val_loss - min_delta: .01 - patience: 5 - restore_best_weights: true - dependencies: - - init_tensorflow - - training_dataset: - create_image_dataset: - data_dir: $training_dir - subset: training - image_size: $image_size - seed: $dataset_seed - validation_split: $validation_split - batch_size: $batch_size - dependencies: - - init_tensorflow - - validation_dataset: - create_image_dataset: - data_dir: $training_dir - subset: validation - image_size: $image_size - seed: $dataset_seed - validation_split: $validation_split - batch_size: $batch_size - dependencies: - - init_tensorflow - - testing_dataset: - create_image_dataset: - data_dir: $testing_dir - subset: null - image_size: $image_size - seed: $dataset_seed - validation_split: null - batch_size: $batch_size - dependencies: - - init_tensorflow - - num_classes: - get_n_classes_from_directory_iterator: $training_dataset - - classifier: - init_classifier: - model_architecture: $model_architecture - optimizer: $optimizer - metrics: $perf_metrics - input_shape: $image_size - n_classes: $num_classes - dependencies: - - init_tensorflow - - model: - fit: - estimator: $classifier - x: $training_dataset - fit_kwargs: - nb_epochs: $epochs - validation_data: $validation_dataset - callbacks: $callbacks - verbose: 2 - - eval_metrics_tensorflow: - evaluate_metrics_tensorflow: - - $classifier - - $testing_dataset - dependencies: - - model - - log_metrics: - log_metrics: $eval_metrics_tensorflow - - log_keras_estimator: - log_tensorflow_keras_estimator: - - $classifier - - model - dependencies: - - model - - add_model_to_registry: - add_model_to_registry: - - $register_model_name - - model - dependencies: - - log_keras_estimator From ec2b04b06e081b27631541ed9b7e1c36c0a156d6 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 18 Sep 2024 13:06:16 -0400 Subject: [PATCH 03/18] examples: remove more unneeded files --- examples/scripts/register_queues.py | 101 --------- examples/scripts/register_task_plugins.py | 212 ------------------ .../defenses_image_preprocessing.py | 15 +- .../feature_squeezing/cw_inf_plugin.py | 2 +- .../feature_squeezing/cw_l2_plugin.py | 2 +- .../feature_squeezing/jsma_plugin.py | 2 +- .../feature_squeezing/squeeze_plugin.py | 2 +- 7 files changed, 18 insertions(+), 318 deletions(-) delete mode 100644 examples/scripts/register_queues.py delete mode 100644 examples/scripts/register_task_plugins.py diff --git a/examples/scripts/register_queues.py b/examples/scripts/register_queues.py deleted file mode 100644 index fbbf4f949..000000000 --- a/examples/scripts/register_queues.py +++ /dev/null @@ -1,101 +0,0 @@ -#!/usr/bin/env python -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Register the queues used in Dioptra's examples and demos. - -Functions: - register_queues: The Click command for registering the queues used in Dioptra's - examples and demos. -""" -from __future__ import annotations - -import click -from rich.console import Console - -# The try/except ImportError blocks allow this script to be invoked using: -# python ./scripts/register_task_plugins.py # OR -# python -m scripts.register_task_plugins -try: - from .client import DioptraClient - -except ImportError: - from client import DioptraClient - -try: - from .utils import RichConsole - -except ImportError: - from utils import RichConsole - -_CONTEXT_SETTINGS = dict( - help_option_names=["-h", "--help"], - show_default=True, -) - - -@click.command(context_settings=_CONTEXT_SETTINGS) -@click.option( - "--queue", - multiple=True, - type=click.STRING, - default=["tensorflow_cpu", "tensorflow_gpu", "pytorch_cpu", "pytorch_gpu"], - help="The queue name to register.", -) -@click.option( - "--api-url", - type=click.STRING, - default="http://localhost", - help="The url to the Dioptra REST API.", -) -def register_queues(queue, api_url): - """Register the queues used in Dioptra's examples and demos.""" - - console = RichConsole(Console()) - client = DioptraClient(address=api_url) - - console.print_title("Dioptra Examples - Register Queues") - console.print_parameter("queue", value=f"[default not bold]{', '.join(queue)}[/]") - console.print_parameter("api_url", value=f"[default not bold]{api_url}[/]") - - for name in queue: - response = client.get_queue_by_name(name=name) - - if response is None or "Not Found" in response.get("message", []): - response = client.register_queue(name=name) - response_after = client.get_queue_by_name(name=name) - - if response_after is None or "Not Found" in response_after.get("message", []): - raise RuntimeError( - f"Failed to register the queue {name!r}. Is the API URL correct?" - ) - - console.print_success( - "[bold green]Success![/] [default not bold]Registered the queue " - f"{name!r}.[/]" - ) - - else: - console.print_info( - f"[bold white]Skipped.[/] [default not bold]The queue {name!r} is " - "already registered.[/]" - ) - - console.print_success("[default no bold]Queue registration is complete.[/]") - - -if __name__ == "__main__": - register_queues() diff --git a/examples/scripts/register_task_plugins.py b/examples/scripts/register_task_plugins.py deleted file mode 100644 index 52a319b77..000000000 --- a/examples/scripts/register_task_plugins.py +++ /dev/null @@ -1,212 +0,0 @@ -#!/usr/bin/env python -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Register the custom task plugins used in Dioptra's examples and demos. - -Classes: - CustomTaskPlugin: A dictionary containing the name and path to the tarball for - each custom task plugin package. - -Functions: - make_custom_plugins: Create a tarball for each custom task plugin package under a - directory. - upload_custom_plugin_package: Upload a custom task plugin package via the Dioptra - REST API. - delete_custom_plugin_package: Delete a custom task plugin package via the Dioptra - REST API. - register_task_plugins: The Click command for registering the custom task plugins - used in Dioptra's examples and demos. -""" -from __future__ import annotations - -from pathlib import Path -from tempfile import TemporaryDirectory -from typing import Iterable, TypedDict - -import click -from rich.console import Console - -# The try/except ImportError blocks allow this script to be invoked using: -# python ./scripts/register_task_plugins.py # OR -# python -m scripts.register_task_plugins -try: - from .client import DioptraClient - -except ImportError: - from client import DioptraClient - -try: - from .utils import RichConsole, make_tar - -except ImportError: - from utils import RichConsole, make_tar - -_CONTEXT_SETTINGS = dict( - help_option_names=["-h", "--help"], - show_default=True, -) - - -class CustomTaskPlugin(TypedDict): - name: str - path: Path - - -def make_custom_plugins( - plugins_dir: Path, output_dir: Path -) -> Iterable[CustomTaskPlugin]: - """Create a tarball for each custom task plugin package under a directory. - - Args: - plugins_dir: The directory containing the custom task plugin subdirectories. - output_dir: The directory where the tarballs will be saved. - - Yields: - A dictionary containing the name and path to the tarball for each custom task - plugin package. - """ - plugin_packages: list[Path] = [x for x in plugins_dir.glob("*/*") if x.is_dir()] - - for plugin_package in plugin_packages: - plugin_name = plugin_package.name - plugin_path = make_tar( - source_dir=[plugin_package], - tarball_filename=f"custom-plugins-{plugin_name}.tar.gz", - working_dir=output_dir, - ) - yield CustomTaskPlugin(name=plugin_name, path=plugin_path) - - -def upload_custom_plugin_package( - client: DioptraClient, custom_plugin: CustomTaskPlugin -) -> None: - """Upload a custom task plugin package via the Dioptra REST API. - - Args: - client: The Dioptra REST API client. - custom_plugin: A dictionary containing the name and path to the tarball for - the custom task plugin package. - - Raises: - RuntimeError: If the custom task plugin package fails to upload. - """ - response = client.upload_custom_plugin_package( - custom_plugin_name=custom_plugin["name"], - custom_plugin_file=custom_plugin["path"], - ) - response_after = client.get_custom_task_plugin(name=custom_plugin["name"]) - - if response_after is None or "Not Found" in response_after.get("message", []): - raise RuntimeError( - "Failed to register the custom task plugin " - f"{custom_plugin['name']!r}. Is the API URL correct?" - ) - - -def delete_custom_plugin_package( - client: DioptraClient, custom_plugin: CustomTaskPlugin -) -> None: - """Delete a custom task plugin package via the Dioptra REST API. - - Args: - client: The Dioptra REST API client. - custom_plugin: A dictionary containing the name and path to the tarball for - the custom task plugin package. - - Raises: - RuntimeError: If the custom task plugin package fails to delete. - """ - response = client.delete_custom_task_plugin(custom_plugin["name"]) - - if response is None or "Success" not in response.get("status", []): - raise RuntimeError( - "Failed to delete the custom task plugin " - f"{custom_plugin['name']!r}. Is the API URL correct?" - ) - - -@click.command(context_settings=_CONTEXT_SETTINGS) -@click.option( - "--plugins-dir", - type=click.Path(exists=True, file_okay=False, dir_okay=True, path_type=Path), - default="./task-plugins", - help=( - "The path to the directory containing the custom task plugin subdirectories." - ), -) -@click.option( - "--api-url", - type=click.STRING, - default="http://localhost", - help="The url to the Dioptra REST API.", -) -@click.option( - "-f", - "--force", - type=click.BOOL, - is_flag=True, - show_default=True, - default=False, - help="Remove and re-register any existing custom task plugins.", -) -def register_task_plugins(plugins_dir, api_url, force): - """Register the custom task plugins used in Dioptra's examples and demos.""" - - console = RichConsole(Console()) - client = DioptraClient(address=api_url) - - console.print_title("Dioptra Examples - Register Custom Task Plugins") - console.print_parameter("plugins_dir", value=click.format_filename(plugins_dir)) - console.print_parameter("api_url", value=f"[default not bold]{api_url}[/]") - console.print_parameter("force", value=f"{force}") - - with TemporaryDirectory() as temp_dir: - custom_plugins = make_custom_plugins( - plugins_dir=plugins_dir, output_dir=Path(temp_dir) - ) - - for custom_plugin in custom_plugins: - response = client.get_custom_task_plugin(name=custom_plugin["name"]) - - if response is None or "Not Found" in response.get("message", []): - upload_custom_plugin_package(client=client, custom_plugin=custom_plugin) - console.print_success( - "[bold green]Success![/] [default not bold]Registered the custom " - f"task plugin {custom_plugin['name']!r}.[/]" - ) - - elif force: - delete_custom_plugin_package(client=client, custom_plugin=custom_plugin) - upload_custom_plugin_package(client=client, custom_plugin=custom_plugin) - console.print_success( - "[bold yellow]Overwritten.[/] [default not bold]Removed and " - f"re-registered the custom task plugin {custom_plugin['name']!r}.[/]" - ) - - else: - console.print_info( - "[bold white]Skipped.[/] [default not bold]The custom task plugin " - f"{custom_plugin['name']!r} is already registered.[/]" - ) - - console.print_success( - "[default no bold]Custom task plugin registration is complete.[/]" - ) - - -if __name__ == "__main__": - register_task_plugins() diff --git a/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py b/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py index d339167e1..dafd54a0b 100644 --- a/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py +++ b/examples/task-plugins/dioptra_custom/custom_poisoning_plugins/defenses_image_preprocessing.py @@ -66,17 +66,20 @@ @require_package("art", exc_type=ARTDependencyError) @require_package("tensorflow", exc_type=TensorflowDependencyError) def create_defended_dataset( - data_flow: Any, data_dir: str, def_data_dir: Union[str, Path], image_size: Tuple[int, int, int], distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, batch_size: int = 32, + label_mode: str = "categorical", def_type: str = "spatial_smoothing", defense_kwargs: Optional[Dict[str, Any]] = None, ) -> pd.DataFrame: distance_metrics_list = distance_metrics_list or [] + color_mode: str = "rgb" if image_size[2] == 3 else "grayscale" + rescale: float = 1.0 if image_size[2] == 3 else 1.0 / 255 clip_values: Tuple[float, float] = (0, 255) if image_size[2] == 3 else (0, 1.0) + target_size: Tuple[int, int] = image_size[:2] def_data_dir = Path(def_data_dir) defense = _init_defense( @@ -85,6 +88,16 @@ def create_defended_dataset( defense_kwargs=defense_kwargs, ) + data_generator: ImageDataGenerator = ImageDataGenerator(rescale=rescale) + + data_flow = data_generator.flow_from_directory( + directory=data_dir, + target_size=target_size, + color_mode=color_mode, + class_mode=label_mode, + batch_size=batch_size, + shuffle=False, + ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py index 923d10355..5697aa722 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_inf_plugin.py @@ -97,7 +97,7 @@ def create_adversarial_cw_inf_dataset( color_mode=color_mode, class_mode=label_mode, batch_size=batch_size, - shuffle=True, # alse, + shuffle=True, # false, ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py index d8a2d9a3a..a8986d439 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/cw_l2_plugin.py @@ -102,7 +102,7 @@ def create_adversarial_cw_l2_dataset( color_mode=color_mode, class_mode=label_mode, batch_size=batch_size, - shuffle=True, # alse, + shuffle=True, # false, ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py index bd8ab8709..bf9940943 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/jsma_plugin.py @@ -93,7 +93,7 @@ def create_adversarial_jsma_dataset( color_mode=color_mode, class_mode=label_mode, batch_size=batch_size, - shuffle=True, # alse, + shuffle=True, # false, ) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py b/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py index 1b0b11536..f14e83b68 100644 --- a/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py +++ b/examples/task-plugins/dioptra_custom/feature_squeezing/squeeze_plugin.py @@ -84,7 +84,7 @@ def feature_squeeze( run_id=run_id, ) - batch_size = 32 # There is currently a bug preventing batch size from getting passsed in correctly + batch_size = 32 # There is currently a bug preventing batch size from getting passed in correctly tensorflow_global_seed: int = rng.integers(low=0, high=2**31 - 1) dataset_seed: int = rng.integers(low=0, high=2**31 - 1) From 81ac8b2b661f78c756d400e14a996ab5666ef108 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 18 Sep 2024 13:17:07 -0400 Subject: [PATCH 04/18] examples: clean up notebook --- examples/mnist-classifier-demo/demo.ipynb | 52 +++++++++++++++++++++++ 1 file changed, 52 insertions(+) diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 8104d0c83..06d1adfec 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -514,6 +514,58 @@ "infer_jpeg = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job, defense=True)" ] }, + { + "cell_type": "code", + "execution_count": 156, + "metadata": { + "scrolled": true + }, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting defense job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "job_time_limit = '1h'\n", + "wait_for_job(fgm_job, 'defense')\n", + "jpeg_comp_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"defense job for {experiment_id}\",\n", + " queue_id,\n", + " defense_ep,\n", + " {\n", + " \"job_id\": str(fgm_job['id']),\n", + " \"def_type\":\"gaussian_augmentation\"\n", + " }, # -1 means get the latest\n", + " job_time_limit\n", + ")" + ] + }, + { + "cell_type": "code", + "execution_count": 145, + "metadata": {}, + "outputs": [ + { + "data": { + "text/plain": [ + "'Job finished. Starting infer job.'" + ] + }, + "metadata": {}, + "output_type": "display_data" + } + ], + "source": [ + "infer_jpeg = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job, defense=True)" + ] + }, { "cell_type": "code", "execution_count": 155, From 91c5b4ea715b32865dd506fb52a5249e6aa0485f Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 18 Sep 2024 14:24:31 -0400 Subject: [PATCH 05/18] examples: add gaussian defense and ability to use mappings to client --- examples/mnist-classifier-demo/demo.ipynb | 204 +++++------------- .../mnist-classifier-demo/src/defense.yml | 2 + examples/scripts/setup.py | 2 +- 3 files changed, 58 insertions(+), 150 deletions(-) diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 06d1adfec..1f62c6d78 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -31,7 +31,7 @@ }, { "cell_type": "code", - "execution_count": 80, + "execution_count": null, "metadata": { "tags": [] }, @@ -53,12 +53,13 @@ }, { "cell_type": "code", - "execution_count": 81, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ "# Import packages from the Python standard library\n", "import importlib.util\n", + "import json\n", "import os\n", "import sys\n", "import pprint\n", @@ -176,7 +177,7 @@ }, { "cell_type": "code", - "execution_count": 131, + "execution_count": null, "metadata": { "tags": [] }, @@ -194,28 +195,9 @@ }, { "cell_type": "code", - "execution_count": 132, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "\u001b[2m2024-09-18 12:52:25\u001b[0m [\u001b[31m\u001b[1merror \u001b[0m] \u001b[1mError code 400 returned. \u001b[0m \u001b[36mdata\u001b[0m=\u001b[35m{'username': 'pluginuser', 'email': 'pluginuser@dioptra.nccoe.nist.gov', 'password': 'pleasemakesuretoPLUGINthecomputer', 'confirmPassword': 'pleasemakesuretoPLUGINthecomputer'}\u001b[0m \u001b[36mmethod\u001b[0m=\u001b[35mPOST\u001b[0m \u001b[36mresponse\u001b[0m=\u001b[35m{\"message\": \"Bad Request - The username on the registration form is not available. Please select another and resubmit.\"}\n", - "\u001b[0m \u001b[36murl\u001b[0m=\u001b[35mhttp://localhost:20080/api/v1/users/\u001b[0m\n" - ] - }, - { - "data": { - "text/plain": [ - "{'username': 'pluginuser', 'status': 'Login successful'}" - ] - }, - "execution_count": 132, - "metadata": {}, - "output_type": "execute_result" - } - ], + "outputs": [], "source": [ "try:\n", " client.users.create('pluginuser','pluginuser@dioptra.nccoe.nist.gov','pleasemakesuretoPLUGINthecomputer','pleasemakesuretoPLUGINthecomputer')\n", @@ -233,7 +215,7 @@ }, { "cell_type": "code", - "execution_count": 154, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -261,7 +243,7 @@ }, { "cell_type": "code", - "execution_count": 134, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -283,7 +265,7 @@ }, { "cell_type": "code", - "execution_count": 135, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -312,19 +294,9 @@ }, { "cell_type": "code", - "execution_count": 137, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting fgm job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "job_time_limit = '1h'\n", "\n", @@ -350,7 +322,7 @@ }, { "cell_type": "code", - "execution_count": 149, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -366,7 +338,7 @@ " {\"job_id\": str(prev_job['id']),\n", " \"tar_name\": tn,\n", " \"data_dir\": dd,\n", - " \"model_name\": MODEL_NAME, \"model_version\": str(-1)},\n", + " \"model_name\": MODEL_NAME, \"model_version\": str(-1)}, # -1 means get the latest\n", " job_time_limit\n", " )\n", " return infer_job" @@ -374,7 +346,7 @@ }, { "cell_type": "code", - "execution_count": 150, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -391,26 +363,16 @@ }, { "cell_type": "code", - "execution_count": 140, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting infer job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "infer_fgm = infer(experiment_id, queue_id, infer_ep, fgm_job, defense=False)" ] }, { "cell_type": "code", - "execution_count": 141, + "execution_count": null, "metadata": {}, "outputs": [], "source": [ @@ -419,19 +381,11 @@ }, { "cell_type": "code", - "execution_count": 142, - "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting defense job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], "source": [ "job_time_limit = '1h'\n", "wait_for_job(fgm_job, 'defense')\n", @@ -440,45 +394,29 @@ " f\"defense job for {experiment_id}\",\n", " queue_id,\n", " defense_ep,\n", - " {\"job_id\": str(fgm_job['id']),\"def_type\":\"spatial_smoothing\"}, # -1 means get the latest\n", + " {\n", + " \"job_id\": str(fgm_job['id']),\n", + " \"def_type\":\"spatial_smoothing\",\n", + " \"defense_kwargs\": json.dumps({})\n", + " }, \n", " job_time_limit\n", ")" ] }, { "cell_type": "code", - "execution_count": 143, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting infer job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "infer_spatial = infer(experiment_id, queue_id, infer_ep, spatial_job, defense=True)" ] }, { "cell_type": "code", - "execution_count": 144, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting defense job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "job_time_limit = '1h'\n", "wait_for_job(fgm_job, 'defense')\n", @@ -489,104 +427,72 @@ " defense_ep,\n", " {\n", " \"job_id\": str(fgm_job['id']),\n", - " \"def_type\":\"jpeg_compression\"\n", - " }, # -1 means get the latest\n", + " \"def_type\":\"jpeg_compression\",\n", + " \"defense_kwargs\": json.dumps({})\n", + " },\n", " job_time_limit\n", ")" ] }, { "cell_type": "code", - "execution_count": 145, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting infer job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "infer_jpeg = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job, defense=True)" ] }, { "cell_type": "code", - "execution_count": 156, + "execution_count": null, "metadata": { "scrolled": true }, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting defense job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ "job_time_limit = '1h'\n", "wait_for_job(fgm_job, 'defense')\n", - "jpeg_comp_job = client.experiments.create_jobs_by_experiment_id(\n", + "gaussian_job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", " f\"defense job for {experiment_id}\",\n", " queue_id,\n", " defense_ep,\n", " {\n", " \"job_id\": str(fgm_job['id']),\n", - " \"def_type\":\"gaussian_augmentation\"\n", - " }, # -1 means get the latest\n", + " \"def_type\":\"gaussian_augmentation\",\n", + " \"defense_kwargs\": json.dumps({\n", + " \"augmentation\": False,\n", + " \"ratio\": 1,\n", + " \"sigma\": 1,\n", + " \"apply_fit\": False,\n", + " \"apply_predict\": True\n", + " })\n", + " }, \n", " job_time_limit\n", ")" ] }, { "cell_type": "code", - "execution_count": 145, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "data": { - "text/plain": [ - "'Job finished. Starting infer job.'" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], + "outputs": [], "source": [ - "infer_jpeg = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job, defense=True)" + "infer_gaussian = infer(experiment_id, queue_id, infer_ep, gaussian_job, defense=True)" ] }, { "cell_type": "code", - "execution_count": 155, + "execution_count": null, "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "{'training_time_in_minutes': 0.32976753333333336, 'accuracy': 0.9775166511535645, 'auc': 0.9987682700157166, 'loss': 0.07407279312610626, 'precision': 0.9809511303901672, 'recall': 0.9750000238418579}\n", - "{'accuracy': 0.11217948794364929, 'auc': 0.6169368028640747, 'precision': 0.09878776967525482, 'loss': 3.25475811958313, 'recall': 0.0546875}\n", - "{'accuracy': 0.11548477411270142, 'auc': 0.6298573613166809, 'loss': 3.010637044906616, 'precision': 0.10013880580663681, 'recall': 0.05058092996478081}\n", - "{'auc': 0.617414653301239, 'precision': 0.12656284868717194, 'accuracy': 0.1341145783662796, 'loss': 2.9532642364501953, 'recall': 0.05779246613383293}\n" - ] - } - ], + "outputs": [], "source": [ "print(get_metrics(training_job))\n", "print(get_metrics(infer_fgm))\n", "print(get_metrics(infer_jpeg))\n", - "print(get_metrics(infer_spatial))" + "print(get_metrics(infer_spatial))\n", + "print(get_metrics(infer_gaussian))" ] } ], diff --git a/examples/mnist-classifier-demo/src/defense.yml b/examples/mnist-classifier-demo/src/defense.yml index 25aa93fdf..6ca88e029 100644 --- a/examples/mnist-classifier-demo/src/defense.yml +++ b/examples/mnist-classifier-demo/src/defense.yml @@ -58,6 +58,7 @@ parameters: norm: "inf" seed: -1 def_type: spatial_smoothing + defense_kwargs: {} tasks: load_artifacts_for_job: @@ -182,6 +183,7 @@ graph: image_size: $image_size batch_size: $batch_size def_type: $def_type + defense_kwargs: $defense_kwargs distance_metrics: - name: l_infinity_norm func: l_inf_norm diff --git a/examples/scripts/setup.py b/examples/scripts/setup.py index c3e4ad1fd..523e5e760 100644 --- a/examples/scripts/setup.py +++ b/examples/scripts/setup.py @@ -161,7 +161,7 @@ def register_plugins(client, group, plugins_to_upload): return list(set(plugins)) def create_parameters_object(client, params): ret = [] - type_map = {'int': 'integer', 'float':'float', 'string':'string', 'list':'list', 'bool': 'boolean'} + type_map = {'int': 'integer', 'float':'float', 'string':'string', 'list':'list', 'bool': 'boolean', 'dict': 'mapping'} for p in params: if (type(params[p]).__name__ in type_map.keys()): paramType = type_map[type(params[p]).__name__] From 4a8327a1954717eb121e7753ee87d822295c086f Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Thu, 19 Sep 2024 10:08:26 -0400 Subject: [PATCH 06/18] examples: add graph at end of demo --- examples/mnist-classifier-demo/demo.ipynb | 68 +++++++++++++++++++++-- 1 file changed, 63 insertions(+), 5 deletions(-) diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 1f62c6d78..2301cf4b4 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -488,11 +488,69 @@ "metadata": {}, "outputs": [], "source": [ - "print(get_metrics(training_job))\n", - "print(get_metrics(infer_fgm))\n", - "print(get_metrics(infer_jpeg))\n", - "print(get_metrics(infer_spatial))\n", - "print(get_metrics(infer_gaussian))" + "import pprint\n", + "\n", + "metrics = {\n", + " \"trained\": get_metrics(training_job),\n", + " \"fgm\": get_metrics(infer_fgm),\n", + " \"jpeg\": get_metrics(infer_jpeg),\n", + " \"spatial\": get_metrics(infer_spatial),\n", + " \"gaussian\": get_metrics(infer_gaussian)\n", + "}\n", + "\n", + "pp = pprint.PrettyPrinter(depth=4)\n", + "pp.pprint(metrics)" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "import numpy as np\n", + "import matplotlib.pyplot as plt \n", + "\n", + "scenarios = [\n", + " 'Base Model',\n", + " 'Fast Gradient Method (Attack)',\n", + " 'JPEG Compression (Defense)',\n", + " 'Spatial Smoothing (Defense)',\n", + " 'Gaussian Noise (Defense)'\n", + "]\n", + "values = [\n", + " metrics['trained']['accuracy'] * 100,\n", + " metrics['fgm']['accuracy'] * 100,\n", + " metrics['jpeg']['accuracy'] * 100,\n", + " metrics['spatial']['accuracy'] * 100,\n", + " metrics['gaussian']['accuracy'] * 100,\n", + "]\n", + "\n", + "fig, ax = plt.subplots(figsize =(16, 9))\n", + "\n", + "# Horizontal Bar Plot\n", + "ax.barh(scenarios, values)\n", + "\n", + "# Add padding between axes and labels\n", + "ax.xaxis.set_tick_params(pad = 5)\n", + "ax.yaxis.set_tick_params(pad = 10)\n", + "\n", + "# Show top values \n", + "ax.invert_yaxis()\n", + "\n", + "# Add annotation to bars\n", + "for i in ax.patches:\n", + " plt.text(i.get_width()+0.2, i.get_y()+0.5, \n", + " str(round((i.get_width()), 2)),\n", + " fontsize = 10, fontweight ='bold',\n", + " color ='grey')\n", + "\n", + "# Add Plot Title\n", + "ax.set_title('Inference Percent Accuracy',\n", + " loc ='left', )\n", + "\n", + "# Show Plot\n", + "plt.show()" ] } ], From 9d761570cf7323dbb56a8435be4d3c9eab0e55cd Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Thu, 3 Oct 2024 14:16:30 -0400 Subject: [PATCH 07/18] examples: added unused patch generation (requires tensorflow downgrade) --- examples/mnist-classifier-demo/demo.ipynb | 13 +- examples/mnist-classifier-demo/src/fgm.yml | 23 +- .../unused/patch_gen.yml | 186 ++++++++++++ .../fgm_mnist_demo/attacks_fgm.py | 5 - .../fgm_mnist_demo/attacks_patch.py | 281 ++++++++++++++++++ .../dioptra_custom/fgm_mnist_demo/plugins.py | 81 ++++- .../fgm_mnist_demo/registry_art.py | 3 +- 7 files changed, 556 insertions(+), 36 deletions(-) create mode 100644 examples/mnist-classifier-demo/unused/patch_gen.yml create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 2301cf4b4..b84b1e7a4 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -45,7 +45,7 @@ "MODEL_NAME = \"mnist_classifier\"\n", "\n", "# Default address for accessing the RESTful API service\n", - "RESTAPI_ADDRESS = \"http://localhost:20080\"\n", + "RESTAPI_ADDRESS = \"http://localhost:80\"\n", "\n", "# Default address for accessing the MLFlow Tracking server\n", "MLFLOW_TRACKING_URI = \"http://localhost:35000\"" @@ -221,7 +221,7 @@ "source": [ "def wait_for_job(job, job_name, quiet=False):\n", " n = 0\n", - " while job['status'] != 'finished': \n", + " while job['status'] not in ['finished', 'failed']:\n", " job = client.jobs.get_by_id(job['id'])\n", " time.sleep(1)\n", " if not quiet:\n", @@ -229,8 +229,11 @@ " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", " n += 1\n", " if not quiet:\n", - " clear_output(wait=True)\n", - " display(f\"Job finished. Starting {job_name} job.\")\n", + " if job['status'] == 'finished':\n", + " clear_output(wait=True)\n", + " display(f\"Job finished. Starting {job_name} job.\")\n", + " else:\n", + " raise Exception(\"Previous job failed. Please see tensorflow-cpu logs for details.\")\n", " " ] }, @@ -251,7 +254,7 @@ "experiment_id, train_ep, queue_id = upload_experiment(client, 'src/train.yml','train','training a classifier on MNIST', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "experiment_id, fgm_ep, queue_id = upload_experiment(client, 'src/fgm.yml','fgm','generating examples on mnist_classifier using the fgm attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" + "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n" ] }, { diff --git a/examples/mnist-classifier-demo/src/fgm.yml b/examples/mnist-classifier-demo/src/fgm.yml index 66fb7fa15..ed4433043 100644 --- a/examples/mnist-classifier-demo/src/fgm.yml +++ b/examples/mnist-classifier-demo/src/fgm.yml @@ -104,20 +104,21 @@ tasks: - name: art type: boolean required: false + - name: image_size + type: any + required: false - name: classifier_kwargs type: kwargs required: false outputs: classifier: classifier - attack: - plugin: dioptra_custom.fgm_mnist_demo.plugins.attack + attack_fgm: + plugin: dioptra_custom.fgm_mnist_demo.plugins.attack_fgm inputs: - dataset: any - - data_dir: string - adv_data_dir: path_string - classifier: any - - image_size: image_size - distance_metrics: distance_metrics_requests - name: rescale type: number @@ -125,9 +126,6 @@ tasks: - name: batch_size type: integer required: false - - name: label_mode - type: string - required: false - name: eps type: number required: false @@ -140,9 +138,6 @@ tasks: - name: norm type: norm required: false - - name: file_format_kwargs - type: kwargs_null - required: false outputs: ret: artifact save_artifacts_and_models: @@ -174,12 +169,10 @@ graph: fgm: - attack: + attack_fgm: dataset: $dataset.testing - data_dir: $data_dir adv_data_dir: $adv_data_dir classifier: $model - image_size: $image_size batch_size: $batch_size eps: $eps eps_step: $eps_step @@ -212,4 +205,6 @@ graph: file_name: $distance_metrics_filename file_format: csv.gz file_format_kwargs: - index: false \ No newline at end of file + index: false + dependencies: + - fgm \ No newline at end of file diff --git a/examples/mnist-classifier-demo/unused/patch_gen.yml b/examples/mnist-classifier-demo/unused/patch_gen.yml new file mode 100644 index 000000000..724d35213 --- /dev/null +++ b/examples/mnist-classifier-demo/unused/patch_gen.yml @@ -0,0 +1,186 @@ +types: + path: + classifier: + artifact: + model_list: + list: classifier + artifact_list: + list: artifact + path_string: + union: [string, path] + kwargs: + mapping: [string, any] + kwargs_null: + union: [kwargs, "null"] + distance_metric_request: + mapping: [string, string] + distance_metrics_requests: + list: distance_metric_request + image_size: + tuple: [integer, integer, integer] + clip_values: + tuple: [number, number, number] + norm: + union: [integer, number, string] + str_null: + union: [string, "null"] + list_str_null: + list: str_null + num_null: + union: [number, "null"] + directory_iterator: + name_parameters: + mapping: + name: string + parameters: + mapping: [string, any] + metrics_list: + list: name_parameters + +parameters: + data_dir: /dioptra/data/Mnist/testing + image_size: [28, 28, 1] + adv_tar_name: adversarial_patch.tar.gz + adv_data_dir: adv_patches + model_name: mnist_classifier + model_version: -1 + clip_values: [0, 1] + seed: -1 + patch_target: -1 + num_patch: 1 + rotation_max: 22.5 + scale_min: 0.1 + scale_max: 1.0 + learning_rate: 5.0 + max_iter: 500 + patch_shape: [2, 2, 1] + batch_size: 10 + +tasks: + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: data_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_model + inputs: + - name: model_name + type: string + required: false + - name: model_version + type: integer + required: false + - name: imagenet_preprocessing + type: boolean + required: false + - name: art + type: boolean + required: false + - name: image_size + type: any + required: false + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: classifier + + attack_patch: + plugin: dioptra_custom.fgm_mnist_demo.plugins.attacks_patch + inputs: + - data_flow: directory_iterator + - adv_data_dir: path_string + - model: any + - patch_target: integer + - num_patch: integer + - num_patch_samples: integer + - rotation_max: number + - scale_min: number + - scale_max: number + - learning_rate: number + - max_iter: integer + - patch_shape: any + + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + +graph: + dataset: + load_dataset: + ep_seed: $seed + data_dir: $data_dir + subsets: [testing] + image_size: $image_size + batch_size: $batch_size + + model: + load_model: + model_name: $model_name + model_version: $model_version + art: true + image_size: $image_size + classifier_kwargs: + clip_values: $clip_values + + + gen_patches: + attack_patch: + data_flow: $dataset.testing + adv_data_dir: $adv_data_dir + model: $model + patch_target: $patch_target + num_patch: $num_patch + num_patch_samples: $batch_size + rotation_max: $rotation_max + scale_min: $scale_min + scale_max: $scale_max + learning_rate: $learning_rate + max_iter: $max_iter + patch_shape: $patch_shape + + save: + save_artifacts_and_models: + artifacts: + - type: tarball + adv_data_dir: $adv_data_dir + adv_tar_name: $adv_tar_name + dependencies: + - gen_patches \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py index 3aa5c3493..7f513c099 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_fgm.py @@ -79,14 +79,10 @@ @require_package("tensorflow", exc_type=TensorflowDependencyError) def fgm( data_flow: Any, - data_dir: str, adv_data_dir: Union[str, Path], keras_classifier: TensorFlowV2Classifier, - image_size: Tuple[int, int, int], distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, - rescale: float = 1.0 / 255, batch_size: int = 32, - label_mode: str = "categorical", eps: float = 0.3, eps_step: float = 0.1, minimal: bool = False, @@ -150,7 +146,6 @@ def fgm( minimal=minimal, norm=norm, ) - print(data_flow) num_images = data_flow.n img_filenames = [Path(x) for x in data_flow.filenames] diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py new file mode 100644 index 000000000..93a118912 --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py @@ -0,0 +1,281 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from __future__ import annotations + +from pathlib import Path +from typing import Callable, Dict, List, Optional, Tuple, Union, Any + +import mlflow +import numpy as np +import pandas as pd +import scipy.stats +import structlog +from prefect import task +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs +from dioptra.sdk.exceptions import ARTDependencyError, TensorflowDependencyError +from dioptra.sdk.utilities.decorators import require_package + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +try: + from art.attacks.evasion import AdversarialPatch + from art.estimators.classification import TensorFlowV2Classifier + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="art", + ) + + +try: + from tensorflow.keras.preprocessing.image import ImageDataGenerator, save_img + +except ImportError: # pragma: nocover + LOGGER.warn( + "Unable to import one or more optional packages, functionality may be reduced", + package="tensorflow", + ) + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_adversarial_patches( + data_flow: Any, + adv_data_dir: Union[str, Path], + keras_classifier: TensorFlowV2Classifier, + patch_target: int, + num_patch: int, + num_patch_samples: int, + rotation_max: float, + scale_min: float, + scale_max: float, + learning_rate: float, + max_iter: int, + patch_shape: Tuple, +) -> pd.DataFrame: + adv_data_dir = Path(adv_data_dir) + batch_size = num_patch_samples + + attack = _init_patch( + keras_classifier=keras_classifier, + batch_size=batch_size, + rotation_max=rotation_max, + scale_min=scale_min, + scale_max=scale_max, + learning_rate=learning_rate, + max_iter=max_iter, + patch_shape=patch_shape, + ) + + # Start by generating adversarial patches. + target_index = patch_target + patch_list = [] + mask_list = [] + id_list = [] + n_classes = len(data_flow.class_indices) + + LOGGER.info( + "Generate adversarial patches", + attack="patch", + num_patches=num_patch, + ) + + for batch_num, (x, y) in enumerate(data_flow): + # Generate random index from available classes. + if patch_target < 0: + target_index = np.random.randint(0, n_classes) + id_list.append(target_index) + y_one_hot = np.zeros(n_classes) + y_one_hot[target_index] = 1.0 + y_target = np.tile(y_one_hot, (x.shape[0], 1)) + + if batch_num >= num_patch: + break + patch, patch_mask = attack.generate(x=x, y=y_target) + patch_list.append(patch) + mask_list.append(patch_mask) + + # Save adversarial patches. + _save_adv_patch(patch_list, mask_list, id_list, num_patch, adv_data_dir) + LOGGER.info("Adversarial patch generation complete", attack="patch") + + return + + +@pyplugs.register +@require_package("art", exc_type=ARTDependencyError) +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def create_adversarial_patch_dataset( + data_flow: Any, + adv_data_dir: Union[str, Path], + patch_dir: str, + keras_classifier: TensorFlowV2Classifier, + patch_shape: Tuple, + distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + batch_size: int = 32, + patch_scale: float = 0.4, + rotation_max: float = 22.5, + scale_min: float = 0.1, + scale_max: float = 1.0, +) -> pd.DataFrame: + distance_metrics_list = distance_metrics_list or [] + adv_data_dir = Path(adv_data_dir) + + patch_list = np.load((patch_dir / "patch_list.npy").resolve()) + + attack = _init_patch( + keras_classifier=keras_classifier, + batch_size=batch_size, + rotation_max=rotation_max, + scale_min=scale_min, + scale_max=scale_max, + patch_shape=patch_shape, + ) + + num_images = data_flow.n + img_filenames = [Path(x) for x in data_flow.filenames] + class_names_list = sorted(data_flow.class_indices, key=data_flow.class_indices.get) + + distance_metrics_: Dict[str, List[List[float]]] = {"image": [], "label": []} + for metric_name, _ in distance_metrics_list: + distance_metrics_[metric_name] = [] + + LOGGER.info( + "Generate adversarial images", + attack="patch", + num_batches=num_images // batch_size, + ) + + converted_patch_list = list(patch_list) + # Apply patch over test set. + for batch_num, (x, y) in enumerate(data_flow): + if batch_num >= num_images // batch_size: + break + LOGGER.info( + "Generate adversarial image batch", + attack="patch", + batch_num=batch_num, + ) + patch_index = np.random.randint(len(converted_patch_list)) + patch = converted_patch_list[patch_index] + y_int = np.argmax(y, axis=1) + if patch_scale > 0: + adv_batch = attack.apply_patch(x, scale=patch_scale, patch_external=patch) + else: + adv_batch = attack.apply_patch(x, patch_external=patch) + + clean_filenames = img_filenames[ + batch_num * batch_size : (batch_num + 1) * batch_size + ] + + _save_batch( + adv_batch, adv_data_dir, y_int, clean_filenames, class_names_list, "adv" + ) + + _evaluate_distance_metrics( + clean_filenames=clean_filenames, + distance_metrics_=distance_metrics_, + clean_batch=x, + adv_batch=adv_batch, + distance_metrics_list=distance_metrics_list, + ) + + LOGGER.info("Adversarial image generation complete", attack="patch") + _log_distance_metrics(distance_metrics_) + return pd.DataFrame(distance_metrics_) + + +def _init_patch( + keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs +) -> AdversarialPatch: + attack = AdversarialPatch( + classifier=keras_classifier, batch_size=batch_size, **kwargs + ) + return attack + + +def _save_adv_patch(patch_list, mask_list, id_list, num_patch, adv_patch_dir): + patch_list = np.array(patch_list) + mask_list = np.array(mask_list) + id_list = np.array(id_list) + + np.save(str(adv_patch_dir) + "/patch_list", patch_list) + np.save(str(adv_patch_dir) + "/patch_mask_list", mask_list) + np.save(str(adv_patch_dir) + "/patch_id_list", id_list) + + for patch_id in range(num_patch): + patch = patch_list[patch_id] + mask = mask_list[patch_id] + + # Combine patch with mask. + masked_patch = patch * mask + + # Save masked patch as image. + adv_patch_path = adv_patch_dir / f"Patch_{patch_id}.png" + + if not adv_patch_path.parent.exists(): + adv_patch_path.parent.mkdir(parents=True) + + save_img(path=str(adv_patch_path), x=masked_patch) + + +def _save_batch( + adv_batch, adv_data_dir, y, clean_filenames, class_names_list, type +) -> None: + for batch_image_num, adv_image in enumerate(adv_batch): + out_label = class_names_list[y[batch_image_num]] + adv_image_path = ( + adv_data_dir + / f"{out_label}" + / f"{type}_{clean_filenames[batch_image_num].name}" + ) + + if not adv_image_path.parent.exists(): + adv_image_path.parent.mkdir(parents=True) + + save_img(path=str(adv_image_path), x=adv_image) + + +def _evaluate_distance_metrics( + clean_filenames, distance_metrics_, clean_batch, adv_batch, distance_metrics_list +) -> None: + LOGGER.debug("evaluate image perturbations using distance metrics") + distance_metrics_["image"].extend([x.name for x in clean_filenames]) + distance_metrics_["label"].extend([x.parent for x in clean_filenames]) + for metric_name, metric in distance_metrics_list: + distance_metrics_[metric_name].extend(metric(clean_batch, adv_batch)) + + +def _log_distance_metrics(distance_metrics_: Dict[str, List[List[float]]]) -> None: + distance_metrics_ = distance_metrics_.copy() + del distance_metrics_["image"] + del distance_metrics_["label"] + for metric_name, metric_values_list in distance_metrics_.items(): + metric_values = np.array(metric_values_list) + mlflow.log_metric(key=f"{metric_name}_mean", value=metric_values.mean()) + mlflow.log_metric(key=f"{metric_name}_median", value=np.median(metric_values)) + mlflow.log_metric(key=f"{metric_name}_stdev", value=metric_values.std()) + mlflow.log_metric( + key=f"{metric_name}_iqr", value=scipy.stats.iqr(metric_values) + ) + mlflow.log_metric(key=f"{metric_name}_min", value=metric_values.min()) + mlflow.log_metric(key=f"{metric_name}_max", value=metric_values.max()) + LOGGER.info("logged distance-based metric", metric_name=metric_name) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py index 08db3cd80..f4880d0c2 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -25,6 +25,9 @@ import scipy.stats import structlog from structlog.stdlib import BoundLogger +from tensorflow.keras.preprocessing.image import ( + DirectoryIterator +) from dioptra import pyplugs from .tensorflow import get_optimizer, get_model_callbacks, get_performance_metrics, evaluate_metrics_tensorflow @@ -44,6 +47,7 @@ from .attacks_fgm import fgm from .artifacts_mlflow import upload_directory_as_tarball_artifact, upload_data_frame_artifact, download_all_artifacts from .defenses_image_preprocessing import create_defended_dataset +from .attacks_patch import create_adversarial_patches, create_adversarial_patch_dataset LOGGER: BoundLogger = structlog.stdlib.get_logger() @@ -125,11 +129,12 @@ def load_model( model_version: int | None = None, imagenet_preprocessing: bool = False, art: bool = False, + image_size: Any = None, classifier_kwargs: Optional[Dict[str, Any]] = None ): uri = get_uri_for_model(model_name, model_version) if (art): - classifier = load_wrapped_tensorflow_keras_classifier(uri, imagenet_preprocessing, classifier_kwargs) + classifier = load_wrapped_tensorflow_keras_classifier(uri, imagenet_preprocessing, image_size, classifier_kwargs) else: classifier = load_tensorflow_keras_classifier(uri) return classifier @@ -194,33 +199,26 @@ def load_artifacts( extract_tarfile(extract) @pyplugs.register -def attack( +def attack_fgm( dataset: Any, - data_dir: str, adv_data_dir: Union[str, Path], classifier: Any, - image_size: Tuple[int, int, int], distance_metrics: List[Dict[str, str]], - rescale: float = 1.0 / 255, batch_size: int = 32, - label_mode: str = "categorical", eps: float = 0.3, eps_step: float = 0.1, minimal: bool = False, norm: Union[int, float, str] = np.inf, ): - make_directories([adv_data_dir] ) + '''generate fgm examples''' + make_directories([adv_data_dir]) distance_metrics_list = get_distance_metric_list(distance_metrics) fgm_dataset = fgm( data_flow=dataset, - data_dir=data_dir, adv_data_dir=adv_data_dir, keras_classifier=classifier, - image_size=image_size, distance_metrics_list=distance_metrics_list, - rescale=rescale, batch_size=batch_size, - label_mode=label_mode, eps=eps, eps_step=eps_step, minimal=minimal, @@ -228,6 +226,67 @@ def attack( ) return fgm_dataset +@pyplugs.register() +def attack_patch( + data_flow: Any, + adv_data_dir: Union[str, Path], + model: Any, + patch_target: int, + num_patch: int, + num_patch_samples: int, + rotation_max: float, + scale_min: float, + scale_max: float, + learning_rate: float, + max_iter: int, + patch_shape: Tuple, +): + '''generate patches''' + make_directories([adv_data_dir]) + create_adversarial_patches( + data_flow=data_flow, + adv_data_dir=adv_data_dir, + keras_classifier=model, + patch_target=patch_target, + num_patch=num_patch, + num_patch_samples=num_patch_samples, + rotation_max=rotation_max, + scale_min=scale_min, + scale_max=scale_max, + learning_rate=learning_rate, + max_iter=max_iter, + patch_shape=patch_shape, + ) + +@pyplugs.register() +def augment_patch( + data_flow: Any, + adv_data_dir: Union[str, Path], + patch_dir: str, + model: Any, + patch_shape: Tuple, + distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + batch_size: int = 32, + patch_scale: float = 0.4, + rotation_max: float = 22.5, + scale_min: float = 0.1, + scale_max: float = 1.0, +): + '''add patches to a dataset''' + make_directories([adv_data_dir]) + create_adversarial_patch_dataset( + data_flow=data_flow, + adv_data_dir=adv_data_dir, + patch_dir=patch_dir, + keras_classifier=model, + patch_shape=patch_shape, + distance_metrics_list=distance_metrics_list, + batch_size=batch_size, + patch_scale=patch_scale, + rotation_max=rotation_max, + scale_min=scale_min, + scale_max=scale_max + ) @pyplugs.register def compute_metrics( classifier: Any, diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py index 7286cf002..f18444ca1 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/registry_art.py @@ -63,6 +63,7 @@ def load_wrapped_tensorflow_keras_classifier( artifact_uri: str, imagenet_preprocessing: bool = False, + image_size: Any = None, classifier_kwargs: Optional[Dict[str, Any]] = None, ) -> TensorFlowV2Classifier: """Loads and wraps a registered Keras classifier for compatibility with the |ART|. @@ -85,7 +86,7 @@ def load_wrapped_tensorflow_keras_classifier( uri=artifact_uri ) nb_classes = keras_classifier.output_shape[1] - input_shape = keras_classifier.input_shape + input_shape = keras_classifier.input_shape if image_size == None else image_size loss_object = losses.get(keras_classifier.loss) preprocessing = ( (np.array([103.939, 116.779, 123.680]), np.array([1.0, 1.0, 1.0])) From 73c7a329bcfb5ee143468c195341bb2944a9e7b2 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Tue, 8 Oct 2024 16:24:02 -0400 Subject: [PATCH 08/18] examples: re-add patch attack (numpy) to mnist demo --- examples/mnist-classifier-demo/demo.ipynb | 354 ++++++++++++------ .../mnist-classifier-demo/src/patch_apply.yml | 213 +++++++++++ .../{unused => src}/patch_gen.yml | 0 .../fgm_mnist_demo/attacks_patch.py | 12 +- .../dioptra_custom/fgm_mnist_demo/plugins.py | 5 +- 5 files changed, 470 insertions(+), 114 deletions(-) create mode 100644 examples/mnist-classifier-demo/src/patch_apply.yml rename examples/mnist-classifier-demo/{unused => src}/patch_gen.yml (100%) diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index b84b1e7a4..38ddba7d2 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -19,7 +19,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Setup" + "### Setup" ] }, { @@ -107,11 +107,35 @@ " os.environ[\"MLFLOW_TRACKING_URI\"] = MLFLOW_TRACKING_URI" ] }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def wait_for_job(job, job_name, quiet=False):\n", + " n = 0\n", + " while job['status'] not in ['finished', 'failed']:\n", + " job = client.jobs.get_by_id(job['id'])\n", + " time.sleep(1)\n", + " if not quiet:\n", + " clear_output(wait=True)\n", + " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", + " n += 1\n", + " if not quiet:\n", + " if job['status'] == 'finished':\n", + " clear_output(wait=True)\n", + " display(f\"Job finished. Starting {job_name} job.\")\n", + " else:\n", + " raise Exception(\"Previous job failed. Please see tensorflow-cpu logs for details.\")\n", + " " + ] + }, { "cell_type": "markdown", "metadata": {}, "source": [ - "## Dataset" + "### Dataset" ] }, { @@ -163,7 +187,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "## Submit and run jobs" + "### Login to Dioptra and setup RESTAPI client" ] }, { @@ -196,7 +220,9 @@ { "cell_type": "code", "execution_count": null, - "metadata": {}, + "metadata": { + "scrolled": true + }, "outputs": [], "source": [ "try:\n", @@ -210,7 +236,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "`wait_for_job` stalls til the previous job was finished, which is useful for jobs which depend on the output of other jobs." + "### Upload all the entrypoints in the src/ folder" ] }, { @@ -219,29 +245,20 @@ "metadata": {}, "outputs": [], "source": [ - "def wait_for_job(job, job_name, quiet=False):\n", - " n = 0\n", - " while job['status'] not in ['finished', 'failed']:\n", - " job = client.jobs.get_by_id(job['id'])\n", - " time.sleep(1)\n", - " if not quiet:\n", - " clear_output(wait=True)\n", - " display(\"Waiting for job.\" + \".\" * (n % 3) )\n", - " n += 1\n", - " if not quiet:\n", - " if job['status'] == 'finished':\n", - " clear_output(wait=True)\n", - " display(f\"Job finished. Starting {job_name} job.\")\n", - " else:\n", - " raise Exception(\"Previous job failed. Please see tensorflow-cpu logs for details.\")\n", - " " + "#delete_all(client)\n", + "experiment_id, train_ep, queue_id = upload_experiment(client, 'src/train.yml','train','training a classifier on MNIST', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, fgm_ep, queue_id = upload_experiment(client, 'src/fgm.yml','fgm','generating examples on mnist_classifier using the fgm attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, patch_gen_ep, queue_id = upload_experiment(client, 'src/patch_gen.yml','patch_gen','generating patches on mnist_classifier using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, patch_apply_ep, queue_id = upload_experiment(client, 'src/patch_apply.yml','patch_apply','applying patches to dataset using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "In this step, we are just uploading all of our entrypoints and the plugins they rely on to the Dioptra server." + "### Train a new le_net model on MNIST" ] }, { @@ -250,20 +267,23 @@ "metadata": {}, "outputs": [], "source": [ - "#delete_all(client)\n", - "experiment_id, train_ep, queue_id = upload_experiment(client, 'src/train.yml','train','training a classifier on MNIST', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, fgm_ep, queue_id = upload_experiment(client, 'src/fgm.yml','fgm','generating examples on mnist_classifier using the fgm attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n" + "job_time_limit = '1h'\n", + "\n", + "training_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id, \n", + " f\"training job for {experiment_id}\", \n", + " queue_id,\n", + " train_ep, \n", + " {\"epochs\":\"20\"}, \n", + " job_time_limit\n", + ")" ] }, { "cell_type": "markdown", "metadata": {}, "source": [ - "Next, we need to train our model. This particular entrypoint uses a LeNet-5 model.\n", - "Depending on the specs of your computer, it can take 5-20 minutes or longer to complete.\n", - "If you are fortunate enough to have access to a dedicated GPU, then the training time will be much shorter." + "### Generate adversarial examples using FGM attack" ] }, { @@ -274,12 +294,13 @@ "source": [ "job_time_limit = '1h'\n", "\n", - "training_job = client.experiments.create_jobs_by_experiment_id(\n", - " experiment_id, \n", - " f\"training job for {experiment_id}\", \n", + "wait_for_job(training_job, 'fgm')\n", + "fgm_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"fgm job for {experiment_id}\",\n", " queue_id,\n", - " train_ep, \n", - " {\"epochs\":\"1\"}, \n", + " fgm_ep,\n", + " {\"model_name\": MODEL_NAME, \"model_version\": str(-1)}, # -1 means get the latest model\n", " job_time_limit\n", ")" ] @@ -288,11 +309,41 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Now that we have trained a model, next we will apply the fast-gradient method (FGM) evasion attack on it to generate adversarial images.\n", - "\n", - "This specific workflow is an example of jobs that contain dependencies, as the metric evaluation jobs cannot start until the adversarial image generation jobs have completed, and the adversarial image generation job cannot start until the training job has completed.\n", + "### Generate patches based on the model and dataset" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "job_time_limit = '1h'\n", "\n", - "Note that the training_job id is needed to tell the FGM attack which model to generate examples against." + "experiment_id, patch_gen_ep, queue_id = upload_experiment(client, 'src/patch_gen.yml','patch_gen','generating patches on mnist_classifier using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "wait_for_job(training_job, 'patch_gen')\n", + "patch_gen_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"patch generation job for {experiment_id}\",\n", + " queue_id,\n", + " patch_gen_ep,\n", + " {\"model_name\": MODEL_NAME,\n", + " \"model_version\": str(-1), # -1 means get the latest\n", + " \"rotation_max\": str(180),\n", + " \"max_iter\": str(5000),\n", + " \"learning_rate\": str(5.0),\n", + " },\n", + " job_time_limit\n", + ")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Generate adversarial examplesrated patches to the testing data" ] }, { @@ -303,13 +354,19 @@ "source": [ "job_time_limit = '1h'\n", "\n", - "wait_for_job(training_job, 'fgm')\n", - "fgm_job = client.experiments.create_jobs_by_experiment_id(\n", + "experiment_id, patch_apply_ep, queue_id = upload_experiment(client, 'src/patch_apply.yml','patch_apply','applying patches to dataset using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "wait_for_job(training_job, 'patch_apply')\n", + "patch_apply_job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", - " f\"fgm job for {experiment_id}\",\n", + " f\"patch generation job for {experiment_id}\",\n", " queue_id,\n", - " fgm_ep,\n", - " {\"model_name\": MODEL_NAME, \"model_version\": str(-1)}, # -1 means get the latest\n", + " patch_apply_ep,\n", + " {\"model_name\": MODEL_NAME, \n", + " \"model_version\": str(-1), # -1 means get the latest model\n", + " \"job_id\": str(patch_gen_job['id']),# we need the patches we just generated too\n", + " \"patch_scale\": str(0.5),\n", + " \"rotation_max\": str(180),\n", + " }, \n", " job_time_limit\n", ")" ] @@ -318,9 +375,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "Finally, we can test out the results of our adversarial attack on the model we trained earlier. This will wait for the FGM job to finish, and then evaluate the model's performance on the adversarial examples. Note that we need to know both the `fgm_job` id as well as the `training_job` id, so that this entrypoint knows which run's adversarial examples to test against which model. \n", - "\n", - "The previous runs are all stored in Dioptra as well, so you can always go back later and retrieve examples, models, and even the code used to create them." + "### Helper functions to submit infer & defend jobs" ] }, { @@ -329,9 +384,9 @@ "metadata": {}, "outputs": [], "source": [ - "def infer(experiment_id, queue_id, infer_ep, prev_job, job_time_limit='1h', defense=False):\n", - " dd = \"def_testing\" if defense else \"adv_testing\"\n", - " tn = \"testing_adversarial_def.tar.gz\" if defense else \"testing_adversarial_fgm.tar.gz\"\n", + "def infer(experiment_id, queue_id, infer_ep, prev_job, job_time_limit='1h', adv=\"def\"):\n", + " dd = \"def_testing\" if adv == \"def\" else \"adv_testing\"\n", + " tn = f\"testing_adversarial_{adv}.tar.gz\"\n", " wait_for_job(prev_job, 'infer', quiet=False)\n", " infer_job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", @@ -370,7 +425,51 @@ "metadata": {}, "outputs": [], "source": [ - "infer_fgm = infer(experiment_id, queue_id, infer_ep, fgm_job, defense=False)" + "def defend(experiment_id, queue_id, defense_ep, prev_job, defense=\"spatial_smoothing\", adv=\"adv\", defense_kwargs=None, job_time_limit='1h'):\n", + " defense_kwargs = {} if defense_kwargs is None else defense_kwargs\n", + " tn = f\"testing_adversarial_{adv}.tar.gz\"\n", + " wait_for_job(prev_job, defense + ' defense')\n", + " def_job = client.experiments.create_jobs_by_experiment_id(\n", + " experiment_id,\n", + " f\"defense job for {experiment_id}\",\n", + " queue_id,\n", + " defense_ep,\n", + " {\n", + " \"job_id\": str(prev_job['id']),\n", + " \"def_type\":defense,\n", + " \"adv_tar_name\": tn,\n", + " \"defense_kwargs\": json.dumps(defense_kwargs)\n", + " }, \n", + " job_time_limit\n", + " )\n", + " return def_job" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run Spatial Smoothing, JPEG Compression, Gaussian Defense against FGM" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "infer_fgm = infer(experiment_id, queue_id, infer_ep, fgm_job, adv=\"fgm\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "spatial_job_fgm = defend(experiment_id, queue_id, defense_ep, fgm_job, defense=\"spatial_smoothing\", adv=\"fgm\")" ] }, { @@ -379,7 +478,25 @@ "metadata": {}, "outputs": [], "source": [ - "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" + "infer_spatial_fgm = infer(experiment_id, queue_id, infer_ep, spatial_job_fgm, adv=\"def\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "jpeg_comp_job_fgm = defend(experiment_id, queue_id, defense_ep, fgm_job, defense=\"jpeg_compression\", adv=\"fgm\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "infer_jpeg_fgm = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job_fgm, adv=\"def\")" ] }, { @@ -390,19 +507,13 @@ }, "outputs": [], "source": [ - "job_time_limit = '1h'\n", - "wait_for_job(fgm_job, 'defense')\n", - "spatial_job = client.experiments.create_jobs_by_experiment_id(\n", - " experiment_id,\n", - " f\"defense job for {experiment_id}\",\n", - " queue_id,\n", - " defense_ep,\n", - " {\n", - " \"job_id\": str(fgm_job['id']),\n", - " \"def_type\":\"spatial_smoothing\",\n", - " \"defense_kwargs\": json.dumps({})\n", - " }, \n", - " job_time_limit\n", + "gaussian_job_fgm = defend(experiment_id, queue_id, defense_ep, fgm_job, defense=\"gaussian_augmentation\", adv=\"fgm\", defense_kwargs={\n", + " \"augmentation\": False,\n", + " \"ratio\": 1,\n", + " \"sigma\": .1,\n", + " \"apply_fit\": False,\n", + " \"apply_predict\": True\n", + " }\n", ")" ] }, @@ -412,7 +523,14 @@ "metadata": {}, "outputs": [], "source": [ - "infer_spatial = infer(experiment_id, queue_id, infer_ep, spatial_job, defense=True)" + "infer_gaussian_fgm = infer(experiment_id, queue_id, infer_ep, gaussian_job_fgm, adv=\"def\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Run Spatial Smoothing, JPEG Compression, Gaussian Defense against Patch Attack" ] }, { @@ -421,20 +539,18 @@ "metadata": {}, "outputs": [], "source": [ - "job_time_limit = '1h'\n", - "wait_for_job(fgm_job, 'defense')\n", - "jpeg_comp_job = client.experiments.create_jobs_by_experiment_id(\n", - " experiment_id,\n", - " f\"defense job for {experiment_id}\",\n", - " queue_id,\n", - " defense_ep,\n", - " {\n", - " \"job_id\": str(fgm_job['id']),\n", - " \"def_type\":\"jpeg_compression\",\n", - " \"defense_kwargs\": json.dumps({})\n", - " },\n", - " job_time_limit\n", - ")" + "infer_patch = infer(experiment_id, queue_id, infer_ep, patch_apply_job, adv=\"patch\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": { + "scrolled": true + }, + "outputs": [], + "source": [ + "spatial_job_patch = defend(experiment_id, queue_id, defense_ep, patch_apply_job, defense=\"spatial_smoothing\", adv=\"patch\")" ] }, { @@ -443,7 +559,25 @@ "metadata": {}, "outputs": [], "source": [ - "infer_jpeg = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job, defense=True)" + "infer_spatial_patch = infer(experiment_id, queue_id, infer_ep, spatial_job_patch, adv=\"def\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "jpeg_comp_job_patch = defend(experiment_id, queue_id, defense_ep, patch_apply_job, defense=\"jpeg_compression\", adv=\"patch\")" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "infer_jpeg_patch = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job_patch, adv=\"def\")" ] }, { @@ -454,25 +588,13 @@ }, "outputs": [], "source": [ - "job_time_limit = '1h'\n", - "wait_for_job(fgm_job, 'defense')\n", - "gaussian_job = client.experiments.create_jobs_by_experiment_id(\n", - " experiment_id,\n", - " f\"defense job for {experiment_id}\",\n", - " queue_id,\n", - " defense_ep,\n", - " {\n", - " \"job_id\": str(fgm_job['id']),\n", - " \"def_type\":\"gaussian_augmentation\",\n", - " \"defense_kwargs\": json.dumps({\n", + "gaussian_job_patch = defend(experiment_id, queue_id, defense_ep, patch_apply_job, defense=\"gaussian_augmentation\", adv=\"patch\", defense_kwargs={\n", " \"augmentation\": False,\n", " \"ratio\": 1,\n", - " \"sigma\": 1,\n", + " \"sigma\": .1,\n", " \"apply_fit\": False,\n", " \"apply_predict\": True\n", - " })\n", - " }, \n", - " job_time_limit\n", + " }\n", ")" ] }, @@ -482,7 +604,14 @@ "metadata": {}, "outputs": [], "source": [ - "infer_gaussian = infer(experiment_id, queue_id, infer_ep, gaussian_job, defense=True)" + "infer_gaussian_patch = infer(experiment_id, queue_id, infer_ep, gaussian_job_patch, adv=\"def\")" + ] + }, + { + "cell_type": "markdown", + "metadata": {}, + "source": [ + "### Display Metrics" ] }, { @@ -496,9 +625,13 @@ "metrics = {\n", " \"trained\": get_metrics(training_job),\n", " \"fgm\": get_metrics(infer_fgm),\n", - " \"jpeg\": get_metrics(infer_jpeg),\n", - " \"spatial\": get_metrics(infer_spatial),\n", - " \"gaussian\": get_metrics(infer_gaussian)\n", + " \"patch\": get_metrics(infer_patch),\n", + " \"jpeg_fgm\": get_metrics(infer_jpeg_fgm),\n", + " \"spatial_fgm\": get_metrics(infer_spatial_fgm),\n", + " \"gaussian_fgm\": get_metrics(infer_gaussian_fgm),\n", + " \"jpeg_patch\": get_metrics(infer_jpeg_patch),\n", + " \"spatial_patch\": get_metrics(infer_spatial_patch),\n", + " \"gaussian_patch\": get_metrics(infer_gaussian_patch)\n", "}\n", "\n", "pp = pprint.PrettyPrinter(depth=4)\n", @@ -517,16 +650,25 @@ "scenarios = [\n", " 'Base Model',\n", " 'Fast Gradient Method (Attack)',\n", - " 'JPEG Compression (Defense)',\n", - " 'Spatial Smoothing (Defense)',\n", - " 'Gaussian Noise (Defense)'\n", + " 'JPEG Compression vs. FGM (Defense)',\n", + " 'Spatial Smoothing vs. FGM (Defense)',\n", + " 'Gaussian Noise vs. FGM (Defense)',\n", + " 'Adversarial Patch (Attack)',\n", + " 'JPEG Compression vs. Patch (Defense)',\n", + " 'Spatial Smoothing vs. Patch (Defense)',\n", + " 'Gaussian Noise vs. Patch (Defense)'\n", + "\n", "]\n", "values = [\n", " metrics['trained']['accuracy'] * 100,\n", " metrics['fgm']['accuracy'] * 100,\n", - " metrics['jpeg']['accuracy'] * 100,\n", - " metrics['spatial']['accuracy'] * 100,\n", - " metrics['gaussian']['accuracy'] * 100,\n", + " metrics['jpeg_fgm']['accuracy'] * 100,\n", + " metrics['spatial_fgm']['accuracy'] * 100,\n", + " metrics['gaussian_fgm']['accuracy'] * 100,\n", + " metrics['patch']['accuracy'] * 100,\n", + " metrics['jpeg_patch']['accuracy'] * 100,\n", + " metrics['spatial_patch']['accuracy'] * 100,\n", + " metrics['gaussian_patch']['accuracy'] * 100,\n", "]\n", "\n", "fig, ax = plt.subplots(figsize =(16, 9))\n", diff --git a/examples/mnist-classifier-demo/src/patch_apply.yml b/examples/mnist-classifier-demo/src/patch_apply.yml new file mode 100644 index 000000000..366e5d6b4 --- /dev/null +++ b/examples/mnist-classifier-demo/src/patch_apply.yml @@ -0,0 +1,213 @@ +types: + path: + classifier: + artifact: + model_list: + list: classifier + artifact_list: + list: artifact + path_string: + union: [string, path] + list_path_string: + list: path_string + list_integer: + list: integer + kwargs: + mapping: [string, any] + kwargs_null: + union: [kwargs, "null"] + distance_metric_request: + mapping: [string, string] + distance_metrics_requests: + list: distance_metric_request + image_size: + tuple: [integer, integer, integer] + clip_values: + tuple: [number, number, number] + norm: + union: [integer, number, string] + str_null: + union: [string, "null"] + list_str_null: + list: str_null + num_null: + union: [number, "null"] + directory_iterator: + name_parameters: + mapping: + name: string + parameters: + mapping: [string, any] + metrics_list: + list: name_parameters + +parameters: + data_dir: /dioptra/data/Mnist/testing + image_size: [28, 28, 1] + patch_dir: adv_patches + adv_patch_tar_name: adversarial_patch.tar.gz + adv_data_dir: adv_testing + adv_dataset_tar_name: testing_adversarial_patch.tar.gz + job_id: + model_name: mnist_classifier + model_version: -1 + clip_values: [0, 1] + seed: -1 + rotation_max: 22.5 + scale_min: 0.1 + scale_max: 1.0 + patch_scale: .4 + patch_shape: [2, 2, 1] + batch_size: 10 + +tasks: + load_artifacts_for_job: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job + inputs: + - job_id: string + - name: extract_files + type: list_path_string + required: false + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: data_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_model + inputs: + - name: model_name + type: string + required: false + - name: model_version + type: integer + required: false + - name: imagenet_preprocessing + type: boolean + required: false + - name: art + type: boolean + required: false + - name: image_size + type: any + required: false + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: classifier + + augment_patch: + plugin: dioptra_custom.fgm_mnist_demo.plugins.augment_patch + inputs: + - data_flow: directory_iterator + - adv_data_dir: path_string + - patch_dir: path_string + - model: any + - patch_shape: list_integer + - distance_metrics: distance_metrics_requests + - batch_size: integer + - patch_scale: number + - rotation_max: number + - scale_min: number + - scale_max: number + + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + +graph: + load: + load_artifacts_for_job: + job_id: $job_id + extract_files: [$adv_patch_tar_name] + + dataset: + load_dataset: + ep_seed: $seed + data_dir: $data_dir + subsets: [testing] + image_size: $image_size + batch_size: $batch_size + + model: + load_model: + model_name: $model_name + model_version: $model_version + art: true + image_size: $image_size + classifier_kwargs: + clip_values: $clip_values + + apply_patches: + augment_patch: + data_flow: $dataset.testing + adv_data_dir: $adv_data_dir + patch_dir: $patch_dir + model: $model + patch_shape: $patch_shape + distance_metrics: + - name: l_infinity_norm + func: l_inf_norm + - name: l_1_norm + func: l_1_norm + - name: l_2_norm + func: l_2_norm + - name: cosine_similarity + func: paired_cosine_similarities + - name: euclidean_distance + func: paired_euclidean_distances + - name: manhattan_distance + func: paired_manhattan_distances + - name: wasserstein_distance + func: paired_wasserstein_distances + batch_size: $batch_size + patch_scale: $patch_scale + rotation_max: $rotation_max + scale_min: $scale_min + scale_max: $scale_max + + save: + save_artifacts_and_models: + artifacts: + - type: tarball + adv_data_dir: $adv_data_dir + adv_tar_name: $adv_dataset_tar_name + dependencies: + - apply_patches \ No newline at end of file diff --git a/examples/mnist-classifier-demo/unused/patch_gen.yml b/examples/mnist-classifier-demo/src/patch_gen.yml similarity index 100% rename from examples/mnist-classifier-demo/unused/patch_gen.yml rename to examples/mnist-classifier-demo/src/patch_gen.yml diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py index 93a118912..5ea92a3ee 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/attacks_patch.py @@ -34,7 +34,7 @@ LOGGER: BoundLogger = structlog.stdlib.get_logger() try: - from art.attacks.evasion import AdversarialPatch + from art.attacks.evasion import AdversarialPatchNumpy from art.estimators.classification import TensorFlowV2Classifier except ImportError: # pragma: nocover @@ -82,7 +82,7 @@ def create_adversarial_patches( scale_max=scale_max, learning_rate=learning_rate, max_iter=max_iter, - patch_shape=patch_shape, + #patch_shape=patch_shape, ) # Start by generating adversarial patches. @@ -139,7 +139,7 @@ def create_adversarial_patch_dataset( distance_metrics_list = distance_metrics_list or [] adv_data_dir = Path(adv_data_dir) - patch_list = np.load((patch_dir / "patch_list.npy").resolve()) + patch_list = np.load((Path(patch_dir) / "patch_list.npy").resolve()) attack = _init_patch( keras_classifier=keras_classifier, @@ -147,7 +147,7 @@ def create_adversarial_patch_dataset( rotation_max=rotation_max, scale_min=scale_min, scale_max=scale_max, - patch_shape=patch_shape, + #patch_shape=patch_shape, ) num_images = data_flow.n @@ -205,8 +205,8 @@ def create_adversarial_patch_dataset( def _init_patch( keras_classifier: TensorFlowV2Classifier, batch_size: int, **kwargs -) -> AdversarialPatch: - attack = AdversarialPatch( +) -> AdversarialPatchNumpy: + attack = AdversarialPatchNumpy( classifier=keras_classifier, batch_size=batch_size, **kwargs ) return attack diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py index f4880d0c2..6c1ba4e6c 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -262,10 +262,10 @@ def attack_patch( def augment_patch( data_flow: Any, adv_data_dir: Union[str, Path], - patch_dir: str, + patch_dir: Union[str, Path], model: Any, patch_shape: Tuple, - distance_metrics_list: Optional[List[Tuple[str, Callable[..., np.ndarray]]]] = None, + distance_metrics: List[Dict[str, str]], batch_size: int = 32, patch_scale: float = 0.4, rotation_max: float = 22.5, @@ -274,6 +274,7 @@ def augment_patch( ): '''add patches to a dataset''' make_directories([adv_data_dir]) + distance_metrics_list = get_distance_metric_list(distance_metrics) create_adversarial_patch_dataset( data_flow=data_flow, adv_data_dir=adv_data_dir, From 8067cbfcd40e22abad361b407006b1bef1a81b02 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 9 Oct 2024 09:24:38 -0400 Subject: [PATCH 09/18] examples: fix typo in mnist example --- examples/mnist-classifier-demo/demo.ipynb | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 38ddba7d2..722ff058a 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -343,7 +343,7 @@ "cell_type": "markdown", "metadata": {}, "source": [ - "### Generate adversarial examplesrated patches to the testing data" + "### Generate adversarial examples by attaching generated patches to the testing data" ] }, { From b026c82c8932b1a7d7b993d5cd73a3d8ff638269 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:00:32 -0400 Subject: [PATCH 10/18] examples: changed infer to evaluate, added predict and metrics. --- examples/mnist-classifier-demo/demo.ipynb | 152 +++++---- .../mnist-classifier-demo/src/defense.yml | 13 +- .../src/{infer.yml => evaluate.yml} | 16 +- examples/mnist-classifier-demo/src/fgm.yml | 9 +- .../mnist-classifier-demo/src/metrics.yml | 159 ++++++++++ .../mnist-classifier-demo/src/patch_apply.yml | 10 +- .../mnist-classifier-demo/src/patch_gen.yml | 7 +- .../mnist-classifier-demo/src/predict.yml | 169 ++++++++++ examples/mnist-classifier-demo/src/train.yml | 16 +- examples/scripts/setup.py | 5 +- .../fgm_mnist_demo/artifacts_mlflow.py | 45 +++ .../fgm_mnist_demo/data_tensorflow.py | 49 +++ .../defenses_image_preprocessing.py | 3 +- .../fgm_mnist_demo/metrics_performance.py | 296 ++++++++++++++++++ .../dioptra_custom/fgm_mnist_demo/plugins.py | 81 ++++- .../fgm_mnist_demo/tensorflow.py | 6 +- 16 files changed, 940 insertions(+), 96 deletions(-) rename examples/mnist-classifier-demo/src/{infer.yml => evaluate.yml} (89%) create mode 100644 examples/mnist-classifier-demo/src/metrics.yml create mode 100644 examples/mnist-classifier-demo/src/predict.yml create mode 100644 examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_performance.py diff --git a/examples/mnist-classifier-demo/demo.ipynb b/examples/mnist-classifier-demo/demo.ipynb index 722ff058a..06b6fb2a3 100644 --- a/examples/mnist-classifier-demo/demo.ipynb +++ b/examples/mnist-classifier-demo/demo.ipynb @@ -250,8 +250,10 @@ "experiment_id, fgm_ep, queue_id = upload_experiment(client, 'src/fgm.yml','fgm','generating examples on mnist_classifier using the fgm attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "experiment_id, patch_gen_ep, queue_id = upload_experiment(client, 'src/patch_gen.yml','patch_gen','generating patches on mnist_classifier using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "experiment_id, patch_apply_ep, queue_id = upload_experiment(client, 'src/patch_apply.yml','patch_apply','applying patches to dataset using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, infer_ep, queue_id = upload_experiment(client, 'src/infer.yml','infer','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", - "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" + "experiment_id, evaluate_ep, queue_id = upload_experiment(client, 'src/evaluate.yml','evaluate','evaluating performance of mnist_classifier on generated fgm examples', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, predict_ep, queue_id = upload_experiment(client, 'src/predict.yml','predict','use mnist_classifier to predict output for a dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, defense_ep, queue_id = upload_experiment(client, 'src/defense.yml','defense','generating defended dataset', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", + "experiment_id, metrics_ep, queue_id = upload_experiment(client, 'src/metrics.yml','metrics','running metrics over predictions', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)" ] }, { @@ -274,7 +276,7 @@ " f\"training job for {experiment_id}\", \n", " queue_id,\n", " train_ep, \n", - " {\"epochs\":\"20\"}, \n", + " {\"epochs\":\"30\"}, \n", " job_time_limit\n", ")" ] @@ -322,7 +324,6 @@ "source": [ "job_time_limit = '1h'\n", "\n", - "experiment_id, patch_gen_ep, queue_id = upload_experiment(client, 'src/patch_gen.yml','patch_gen','generating patches on mnist_classifier using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "wait_for_job(training_job, 'patch_gen')\n", "patch_gen_job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", @@ -354,7 +355,6 @@ "source": [ "job_time_limit = '1h'\n", "\n", - "experiment_id, patch_apply_ep, queue_id = upload_experiment(client, 'src/patch_apply.yml','patch_apply','applying patches to dataset using the Adversarial Patch attack', PLUGIN_FILES, QUEUE_NAME, QUEUE_DESC, EXPERIMENT_NAME, EXPERIMENT_DESC)\n", "wait_for_job(training_job, 'patch_apply')\n", "patch_apply_job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", @@ -384,22 +384,74 @@ "metadata": {}, "outputs": [], "source": [ - "def infer(experiment_id, queue_id, infer_ep, prev_job, job_time_limit='1h', adv=\"def\"):\n", - " dd = \"def_testing\" if adv == \"def\" else \"adv_testing\"\n", - " tn = f\"testing_adversarial_{adv}.tar.gz\"\n", - " wait_for_job(prev_job, 'infer', quiet=False)\n", - " infer_job = client.experiments.create_jobs_by_experiment_id(\n", + "def run_job(experiment_id, queue_id, ep, title, prev_job_id=False, latest_model=False, args=None, prev_job=None, job_time_limit='1h'):\n", + " if prev_job is not None:\n", + " wait_for_job(prev_job, title, quiet=False)\n", + " if prev_job_id:\n", + " args['job_id'] = str(prev_job['id'])\n", + " if latest_model:\n", + " args['model_name'] = MODEL_NAME \n", + " args['model_version'] = str(-1)\n", + " job = client.experiments.create_jobs_by_experiment_id(\n", " experiment_id,\n", - " f\"infer job for {experiment_id}\",\n", + " f\"{title} job for {experiment_id}\",\n", " queue_id,\n", - " infer_ep,\n", - " {\"job_id\": str(prev_job['id']),\n", - " \"tar_name\": tn,\n", - " \"data_dir\": dd,\n", - " \"model_name\": MODEL_NAME, \"model_version\": str(-1)}, # -1 means get the latest\n", + " ep,\n", + " args,\n", " job_time_limit\n", " )\n", - " return infer_job" + " return job\n", + "def get_prev_tar_file(adv=\"def\"):\n", + " dd = \"def_testing\" if adv == \"def\" else \"adv_testing\"\n", + " tn = f\"testing_adversarial_{adv}.tar.gz\"\n", + " return dd, tn\n" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def defend(experiment_id, queue_id, defense_ep, prev_job, defense=\"spatial_smoothing\", adv=\"adv\", defense_kwargs=None, job_time_limit='1h'):\n", + " defense_kwargs = {} if defense_kwargs is None else defense_kwargs\n", + " dd, tn = get_prev_tar_file(adv)\n", + " arg_dict = {\n", + " \"def_type\":defense,\n", + " \"adv_tar_name\": tn,\n", + " \"defense_kwargs\": json.dumps(defense_kwargs)\n", + " }\n", + " defense_job = run_job(experiment_id, queue_id, defense_ep, defense + ' defense', prev_job_id=True, args=arg_dict, prev_job=prev_job, job_time_limit=job_time_limit)\n", + " return defense_job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def predict(experiment_id, queue_id, predict_ep, prev_job, job_time_limit='1h', adv=\"def\"):\n", + " dd, tn = get_prev_tar_file(adv)\n", + " arg_dict = {\n", + " \"tar_name\": tn,\n", + " \"data_dir\": dd,\n", + " #\"record_actual_class\": str(True), # add a column 'actual' representing the class with the highest probability\n", + " #\"record_target_class\": str(True) # add a column 'target' representing the original class of the image\n", + " } # Note: using both actual and target above removes the need to load the original dataset during metrics calculation\n", + " predict_job = run_job(experiment_id, queue_id, predict_ep, \"predict\", prev_job_id=True, latest_model=True, args=arg_dict, prev_job=prev_job, job_time_limit=job_time_limit)\n", + " return predict_job" + ] + }, + { + "cell_type": "code", + "execution_count": null, + "metadata": {}, + "outputs": [], + "source": [ + "def measure(experiment_id, queue_id, measure_ep, prev_job, job_time_limit='1h'):\n", + " metrics_job = run_job(experiment_id, queue_id, measure_ep, \"metrics\", prev_job_id=True, args={}, prev_job=prev_job, job_time_limit=job_time_limit)\n", + " return metrics_job" ] }, { @@ -419,32 +471,6 @@ " return mlflow_run.data.metrics" ] }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], - "source": [ - "def defend(experiment_id, queue_id, defense_ep, prev_job, defense=\"spatial_smoothing\", adv=\"adv\", defense_kwargs=None, job_time_limit='1h'):\n", - " defense_kwargs = {} if defense_kwargs is None else defense_kwargs\n", - " tn = f\"testing_adversarial_{adv}.tar.gz\"\n", - " wait_for_job(prev_job, defense + ' defense')\n", - " def_job = client.experiments.create_jobs_by_experiment_id(\n", - " experiment_id,\n", - " f\"defense job for {experiment_id}\",\n", - " queue_id,\n", - " defense_ep,\n", - " {\n", - " \"job_id\": str(prev_job['id']),\n", - " \"def_type\":defense,\n", - " \"adv_tar_name\": tn,\n", - " \"defense_kwargs\": json.dumps(defense_kwargs)\n", - " }, \n", - " job_time_limit\n", - " )\n", - " return def_job" - ] - }, { "cell_type": "markdown", "metadata": {}, @@ -458,7 +484,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_fgm = infer(experiment_id, queue_id, infer_ep, fgm_job, adv=\"fgm\")" + "predict_fgm = predict(experiment_id, queue_id, predict_ep, fgm_job, adv=\"fgm\")\n", + "measure_fgm = measure(experiment_id, queue_id, metrics_ep, predict_fgm)" ] }, { @@ -478,7 +505,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_spatial_fgm = infer(experiment_id, queue_id, infer_ep, spatial_job_fgm, adv=\"def\")" + "predict_spatial_fgm = predict(experiment_id, queue_id, predict_ep, spatial_job_fgm, adv=\"def\")\n", + "measure_spatial_fgm = measure(experiment_id, queue_id, metrics_ep, predict_spatial_fgm)" ] }, { @@ -496,7 +524,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_jpeg_fgm = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job_fgm, adv=\"def\")" + "predict_jpeg_comp_fgm = predict(experiment_id, queue_id, predict_ep, jpeg_comp_job_fgm, adv=\"def\")\n", + "measure_jpeg_comp_fgm = measure(experiment_id, queue_id, metrics_ep, predict_jpeg_comp_fgm)" ] }, { @@ -523,7 +552,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_gaussian_fgm = infer(experiment_id, queue_id, infer_ep, gaussian_job_fgm, adv=\"def\")" + "predict_gaussian_fgm = predict(experiment_id, queue_id, predict_ep, gaussian_job_fgm, adv=\"def\")\n", + "measure_gaussian_fgm = measure(experiment_id, queue_id, metrics_ep, predict_gaussian_fgm)" ] }, { @@ -539,7 +569,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_patch = infer(experiment_id, queue_id, infer_ep, patch_apply_job, adv=\"patch\")" + "predict_patch = predict(experiment_id, queue_id, predict_ep, patch_apply_job, adv=\"patch\")\n", + "measure_patch = measure(experiment_id, queue_id, metrics_ep, predict_patch)" ] }, { @@ -559,7 +590,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_spatial_patch = infer(experiment_id, queue_id, infer_ep, spatial_job_patch, adv=\"def\")" + "predict_spatial_patch = predict(experiment_id, queue_id, predict_ep, spatial_job_patch, adv=\"def\")\n", + "measure_spatial_patch = measure(experiment_id, queue_id, metrics_ep, predict_spatial_patch)" ] }, { @@ -577,7 +609,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_jpeg_patch = infer(experiment_id, queue_id, infer_ep, jpeg_comp_job_patch, adv=\"def\")" + "predict_jpeg_comp_patch = predict(experiment_id, queue_id, predict_ep, jpeg_comp_job_patch, adv=\"def\")\n", + "measure_jpeg_comp_patch = measure(experiment_id, queue_id, metrics_ep, predict_jpeg_comp_patch)" ] }, { @@ -604,7 +637,8 @@ "metadata": {}, "outputs": [], "source": [ - "infer_gaussian_patch = infer(experiment_id, queue_id, infer_ep, gaussian_job_patch, adv=\"def\")" + "predict_gaussian_patch = predict(experiment_id, queue_id, predict_ep, gaussian_job_patch, adv=\"def\")\n", + "measure_gaussian_patch = measure(experiment_id, queue_id, metrics_ep, predict_gaussian_patch)" ] }, { @@ -624,14 +658,14 @@ "\n", "metrics = {\n", " \"trained\": get_metrics(training_job),\n", - " \"fgm\": get_metrics(infer_fgm),\n", - " \"patch\": get_metrics(infer_patch),\n", - " \"jpeg_fgm\": get_metrics(infer_jpeg_fgm),\n", - " \"spatial_fgm\": get_metrics(infer_spatial_fgm),\n", - " \"gaussian_fgm\": get_metrics(infer_gaussian_fgm),\n", - " \"jpeg_patch\": get_metrics(infer_jpeg_patch),\n", - " \"spatial_patch\": get_metrics(infer_spatial_patch),\n", - " \"gaussian_patch\": get_metrics(infer_gaussian_patch)\n", + " \"fgm\": get_metrics(measure_fgm),\n", + " \"patch\": get_metrics(measure_patch),\n", + " \"jpeg_fgm\": get_metrics(measure_jpeg_comp_fgm),\n", + " \"spatial_fgm\": get_metrics(measure_spatial_fgm),\n", + " \"gaussian_fgm\": get_metrics(measure_gaussian_fgm),\n", + " \"jpeg_patch\": get_metrics(measure_jpeg_comp_patch),\n", + " \"spatial_patch\": get_metrics(measure_spatial_patch),\n", + " \"gaussian_patch\": get_metrics(measure_gaussian_patch)\n", "}\n", "\n", "pp = pprint.PrettyPrinter(depth=4)\n", diff --git a/examples/mnist-classifier-demo/src/defense.yml b/examples/mnist-classifier-demo/src/defense.yml index 6ca88e029..508dca6d3 100644 --- a/examples/mnist-classifier-demo/src/defense.yml +++ b/examples/mnist-classifier-demo/src/defense.yml @@ -51,7 +51,7 @@ parameters: model_name: mnist_classifier model_version: -1 clip_values: [0, 1] - batch_size: 32 + batch_size: 50 eps: 0.3 eps_step: 0.1 minimal: false @@ -65,6 +65,9 @@ tasks: plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job inputs: - job_id: string + - name: files + type: list_path_string + required: false - name: extract_files type: list_path_string required: false @@ -74,7 +77,10 @@ tasks: - name: ep_seed type: integer required: false - - name: data_dir + - name: training_dir + type: string + required: false + - name: testing_dir type: string required: false - name: subsets @@ -163,7 +169,8 @@ graph: dataset: load_dataset: ep_seed: $seed - data_dir: $adv_data_dir + testing_dir: $adv_data_dir + batch_size: $batch_size subsets: [testing] image_size: $image_size diff --git a/examples/mnist-classifier-demo/src/infer.yml b/examples/mnist-classifier-demo/src/evaluate.yml similarity index 89% rename from examples/mnist-classifier-demo/src/infer.yml rename to examples/mnist-classifier-demo/src/evaluate.yml index bc659e585..81665aba8 100644 --- a/examples/mnist-classifier-demo/src/infer.yml +++ b/examples/mnist-classifier-demo/src/evaluate.yml @@ -32,6 +32,9 @@ tasks: plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job inputs: - job_id: string + - name: files + type: list_path_string + required: false - name: extract_files type: list_path_string required: false @@ -41,7 +44,10 @@ tasks: - name: ep_seed type: integer required: false - - name: data_dir + - name: training_dir + type: string + required: false + - name: testing_dir type: string required: false - name: subsets @@ -91,8 +97,8 @@ tasks: outputs: classifier: classifier - compute_metrics: - plugin: dioptra_custom.fgm_mnist_demo.plugins.compute_metrics + model_metrics: + plugin: dioptra_custom.fgm_mnist_demo.plugins.model_metrics inputs: - classifier: classifier - dataset: directory_iterator @@ -106,7 +112,7 @@ graph: dataset: load_dataset: ep_seed: $seed - data_dir: $data_dir + testing_dir: $data_dir subsets: [testing] image_size: $image_size @@ -116,7 +122,7 @@ graph: model_version: $model_version metrics: - compute_metrics: + model_metrics: classifier: $model dataset: $dataset.testing dependencies: diff --git a/examples/mnist-classifier-demo/src/fgm.yml b/examples/mnist-classifier-demo/src/fgm.yml index ed4433043..16be4f7f9 100644 --- a/examples/mnist-classifier-demo/src/fgm.yml +++ b/examples/mnist-classifier-demo/src/fgm.yml @@ -46,7 +46,7 @@ parameters: model_name: mnist_classifier model_version: -1 clip_values: [0, 1] - batch_size: 32 + batch_size: 50 eps: 0.3 eps_step: 0.1 minimal: false @@ -60,7 +60,10 @@ tasks: - name: ep_seed type: integer required: false - - name: data_dir + - name: training_dir + type: string + required: false + - name: testing_dir type: string required: false - name: subsets @@ -154,7 +157,7 @@ graph: dataset: load_dataset: ep_seed: $seed - data_dir: $data_dir + testing_dir: $data_dir subsets: [testing] image_size: $image_size batch_size: $batch_size diff --git a/examples/mnist-classifier-demo/src/metrics.yml b/examples/mnist-classifier-demo/src/metrics.yml new file mode 100644 index 000000000..c87971de2 --- /dev/null +++ b/examples/mnist-classifier-demo/src/metrics.yml @@ -0,0 +1,159 @@ +types: + path: + classifier: + artifact: + performance_metric: + mapping: [string, string] + performance_metric_list: + list: performance_metric + model_list: + list: classifier + artifact_list: + list: artifact + num_null: + union: [number, "null"] + path_string: + union: [string, path] + list_path_string: + list: path_string + str_null: + union: [string, "null"] + list_str_null: + list: str_null + directory_iterator: + directory_iterator_null: + union: [directory_iterator, "null"] + kwargs: + mapping: [string, any] + metric_kwargs: + mapping: [string, kwargs] + image_size: + tuple: [integer, integer, integer] + ndarray: + metric_output: + mapping: [string, any] +parameters: + run_id: "" + image_size: [28, 28, 1] + data_dir: /dioptra/data/Mnist/testing + job_id: + predictions_filename: predictions.csv + predictions_format: csv + metrics_filename: metrics.csv + metrics_format: csv + n_classes: 10 + seed: -1 +tasks: + load_artifacts_for_job: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job + inputs: + - job_id: string + - name: files + type: list_path_string + required: false + - name: extract_files + type: list_path_string + required: false + outputs: + - paths: list_path_string + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: training_dir + type: string + required: false + - name: testing_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_predictions: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_predictions + inputs: + - paths: list_path_string + - filename: string + - name: format + type: string + required: false + - name: dataset + type: directory_iterator_null + required: false + - name: n_classes + type: integer + required: false + outputs: + - y_true: ndarray + - y_pred: ndarray + + + prediction_metrics: + plugin: dioptra_custom.fgm_mnist_demo.plugins.prediction_metrics + inputs: + - y_true: ndarray + - y_pred: ndarray + - metrics_list: performance_metric_list + - name: func_kwargs + type: metric_kwargs + required: false + outputs: + - metric_results: metric_output + +graph: + predictions: + load_artifacts_for_job: + job_id: $job_id + files: [$predictions_filename] + + dataset: + load_dataset: + ep_seed: $seed + testing_dir: $data_dir + subsets: [testing] + image_size: $image_size + + format_predictions: + load_predictions: + paths: $predictions + filename: $predictions_filename + format: $predictions_format + dataset: $dataset.testing + n_classes: $n_classes + + metrics: + prediction_metrics: + y_true: $format_predictions.y_true + y_pred: $format_predictions.y_pred + metrics_list: + - name: accuracy + func: accuracy + - name: roc_auc + func: roc_auc + dependencies: + - format_predictions \ No newline at end of file diff --git a/examples/mnist-classifier-demo/src/patch_apply.yml b/examples/mnist-classifier-demo/src/patch_apply.yml index 366e5d6b4..456c1baa4 100644 --- a/examples/mnist-classifier-demo/src/patch_apply.yml +++ b/examples/mnist-classifier-demo/src/patch_apply.yml @@ -65,6 +65,9 @@ tasks: plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job inputs: - job_id: string + - name: files + type: list_path_string + required: false - name: extract_files type: list_path_string required: false @@ -74,7 +77,10 @@ tasks: - name: ep_seed type: integer required: false - - name: data_dir + - name: training_dir + type: string + required: false + - name: testing_dir type: string required: false - name: subsets @@ -161,7 +167,7 @@ graph: dataset: load_dataset: ep_seed: $seed - data_dir: $data_dir + testing_dir: $data_dir subsets: [testing] image_size: $image_size batch_size: $batch_size diff --git a/examples/mnist-classifier-demo/src/patch_gen.yml b/examples/mnist-classifier-demo/src/patch_gen.yml index 724d35213..34b965787 100644 --- a/examples/mnist-classifier-demo/src/patch_gen.yml +++ b/examples/mnist-classifier-demo/src/patch_gen.yml @@ -63,7 +63,10 @@ tasks: - name: ep_seed type: integer required: false - - name: data_dir + - name: training_dir + type: string + required: false + - name: testing_dir type: string required: false - name: subsets @@ -146,7 +149,7 @@ graph: dataset: load_dataset: ep_seed: $seed - data_dir: $data_dir + testing_dir: $data_dir subsets: [testing] image_size: $image_size batch_size: $batch_size diff --git a/examples/mnist-classifier-demo/src/predict.yml b/examples/mnist-classifier-demo/src/predict.yml new file mode 100644 index 000000000..990bc3558 --- /dev/null +++ b/examples/mnist-classifier-demo/src/predict.yml @@ -0,0 +1,169 @@ +types: + path: + classifier: + artifact: + model_list: + list: classifier + artifact_list: + list: artifact + num_null: + union: [number, "null"] + path_string: + union: [string, path] + list_path_string: + list: path_string + str_null: + union: [string, "null"] + list_str_null: + list: str_null + directory_iterator: + kwargs: + mapping: [string, any] + image_size: + tuple: [integer, integer, integer] + ndarray: +parameters: + run_id: "" + image_size: [28, 28, 1] + model_name: mnist_classifier + model_version: -1 + job_id: + tar_name: testing_adversarial_fgm.tar.gz + data_dir: adv_testing + filename: predictions.csv + record_actual_class: False + record_target_class: False + format: csv + seed: -1 + +tasks: + load_artifacts_for_job: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_artifacts_for_job + inputs: + - job_id: string + - name: files + type: list_path_string + required: false + - name: extract_files + type: list_path_string + required: false + load_dataset: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_dataset + inputs: + - name: ep_seed + type: integer + required: false + - name: training_dir + type: string + required: false + - name: testing_dir + type: string + required: false + - name: subsets + type: list_str_null + required: false + - name: image_size + type: image_size + required: false + - name: rescale + type: number + required: false + - name: validation_split + type: num_null + required: false + - name: batch_size + type: integer + required: false + - name: label_mode + type: string + required: false + - name: shuffle + type: boolean + required: false + outputs: + - training: directory_iterator + - validation: directory_iterator + - testing: directory_iterator + + load_model: + plugin: dioptra_custom.fgm_mnist_demo.plugins.load_model + inputs: + - name: model_name + type: string + required: false + - name: model_version + type: integer + required: false + - name: imagenet_preprocessing + type: boolean + required: false + - name: art + type: boolean + required: false + - name: classifier_kwargs + type: kwargs + required: false + outputs: + classifier: classifier + + predict: + plugin: dioptra_custom.fgm_mnist_demo.plugins.predict + inputs: + - classifier: classifier + - dataset: directory_iterator + - name: show_actual + type: boolean + required: false + - name: show_target + type: boolean + required: false + outputs: + - predictions: ndarray + + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + +graph: + load: + load_artifacts_for_job: + job_id: $job_id + extract_files: [$tar_name] + + dataset: + load_dataset: + ep_seed: $seed + testing_dir: $data_dir + subsets: [testing] + image_size: $image_size + + model: + load_model: + model_name: $model_name + model_version: $model_version + + predictions: + predict: + classifier: $model + dataset: $dataset.testing + show_actual: $record_actual_class + show_target: $record_target_class + dependencies: + - model + + save: + save_artifacts_and_models: + artifacts: + - type: dataframe + data_frame: $predictions + file_name: $filename + file_format: $format + file_format_kwargs: + dependencies: + - predictions \ No newline at end of file diff --git a/examples/mnist-classifier-demo/src/train.yml b/examples/mnist-classifier-demo/src/train.yml index 2bd1fd161..e29980da4 100644 --- a/examples/mnist-classifier-demo/src/train.yml +++ b/examples/mnist-classifier-demo/src/train.yml @@ -41,7 +41,7 @@ parameters: testing_dir: /dioptra/data/Mnist/testing image_size: [28, 28, 1] validation_split: 0.2 - batch_size: 32 + batch_size: 50 model_architecture: le_net epochs: 30 register_model_name: "mnist_classifier" @@ -53,7 +53,10 @@ tasks: - name: ep_seed type: integer required: false - - name: data_dir + - name: training_dir + type: string + required: false + - name: testing_dir type: string required: false - name: subsets @@ -125,8 +128,8 @@ tasks: outputs: classifier: classifier - compute_metrics: - plugin: dioptra_custom.fgm_mnist_demo.plugins.compute_metrics + model_metrics: + plugin: dioptra_custom.fgm_mnist_demo.plugins.model_metrics inputs: - classifier: classifier - dataset: directory_iterator @@ -145,7 +148,8 @@ graph: dataset: load_dataset: ep_seed: $seed - data_dir: $training_dir + training_dir: $training_dir + testing_dir: $testing_dir subsets: [training, validation, testing] image_size: $image_size validation_split: $validation_split @@ -189,7 +193,7 @@ graph: - model metrics: - compute_metrics: + model_metrics: classifier: $trained_model dataset: $dataset.testing dependencies: diff --git a/examples/scripts/setup.py b/examples/scripts/setup.py index 523e5e760..e9500dd85 100644 --- a/examples/scripts/setup.py +++ b/examples/scripts/setup.py @@ -232,7 +232,10 @@ def delete_all(client): for d in client.entrypoints.get_all(pageLength=100000)['data']: client.entrypoints.delete_by_id(d['id']) for d in client.jobs.get_all(pageLength=100000)['data']: - client.jobs.delete_by_id(d['id']) + try: + client.jobs.delete_by_id(d['id']) + except: + pass for d in client.models.get_all(pageLength=100000)['data']: client.models.delete_by_id(d['id']) for d in client.plugins.get_all(pageLength=100000)['data']: diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py index fa8831a57..d773c6365 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/artifacts_mlflow.py @@ -167,6 +167,51 @@ def to_format( upload_file_as_artifact(artifact_path=df_artifact_path) +def download_df( + filename: str, + format: str = 'csv', + file_format_kwargs: Optional[Dict[str, Any]] = None, +): + def from_format( + format: str + ) -> Dict[str, Any]: + format_funcs = { + "csv": { + "func": pd.read_csv, + }, + "csv.bz2": { + "func": pd.read_csv, + }, + "csv.gz": { + "func": pd.read_csv, + }, + "csv.xz": { + "func": pd.read_csv, + }, + "feather": { + "func": pd.read_feather, + }, + "json": { + "func": pd.read_json, + }, + "pickle": { + "func": pd.read_pickle, + }, + } + + func: Optional[Dict[str, Any]] = format_funcs.get(format) + + if func is None: + raise UnsupportedDataFrameFileFormatError( + f"Serializing data frames from the {file_format} format is not supported" + ) + + return func + format_dict: Dict[str, Any] = from_format(format=format) + df_from_format_func: Callable[..., None] = format_dict["func"] + df = df_from_format_func(filename, **file_format_kwargs) + return df + @pyplugs.register def upload_directory_as_tarball_artifact( diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py index 8b16d6804..cfc079a38 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/data_tensorflow.py @@ -26,9 +26,13 @@ from typing import Optional, Tuple +from typing import Any import structlog +import pandas as pd +import numpy as np from structlog.stdlib import BoundLogger + from dioptra import pyplugs from dioptra.sdk.exceptions import TensorflowDependencyError from dioptra.sdk.utilities.decorators import require_package @@ -128,3 +132,48 @@ def get_n_classes_from_directory_iterator(ds: DirectoryIterator) -> int: The number of unique labels in the dataset. """ return len(ds.class_indices) + +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def predictions_to_df( + predictions: np.ndarray, + dataset: DirectoryIterator = None, + show_actual: bool = False, + show_target: bool = False, +): + n_classes = get_n_classes_from_directory_iterator(dataset) + + + df = pd.DataFrame(predictions) + df.columns = [f'prob_{n}' for n in range(n_classes)] # note: applicable to classification only + + if (show_actual): + y_pred = np.argmax(predictions, axis=1) + df.insert(0, 'actual', y_pred) + if (show_target): + y_true = dataset.classes + df.insert(0, 'target', y_true) + + df.insert(0, 'id', dataset.filepaths) + + return df + +def df_to_predictions( + df: pd.DataFrame, + dataset: DirectoryIterator = None, + n_classes: int = -1, +): + n_classes = get_n_classes_from_directory_iterator(dataset) if dataset is not None else n_classes # get classes from dataset + n_classes = df.columns.str.startswith('prob_').sum() if n_classes < 0 else n_classes # count classes manually + + if (set(['actual','target']).issubset(df.columns)): + y_pred = df['actual'].to_numpy() + y_true = df['target'].to_numpy() + else: + y_pred = np.argmax(df[[f'prob_{n}' for n in range(n_classes)]].to_numpy(), axis=1) + y_true = dataset.classes + + # generate one hot encoding + y_pred = np.eye(n_classes)[y_pred] + y_true = np.eye(n_classes)[y_true] + + return y_true, y_pred \ No newline at end of file diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py index 2ceae26bb..fcd57d0e2 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/defenses_image_preprocessing.py @@ -17,7 +17,7 @@ from __future__ import annotations from pathlib import Path -from typing import Callable, Dict, List, Optional, Tuple, Union +from typing import Callable, Dict, List, Optional, Tuple, Union, Any import mlflow import numpy as np @@ -129,7 +129,6 @@ def create_defended_dataset( LOGGER.info("Defended image generation complete", defense=def_type) _log_distance_metrics(distance_metrics_) - return pd.DataFrame(distance_metrics_) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_performance.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_performance.py new file mode 100644 index 000000000..c2086166e --- /dev/null +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/metrics_performance.py @@ -0,0 +1,296 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +"""A task plugin module for getting functions from a performance metric registry.""" + +from __future__ import annotations + +from typing import Any, Callable, Dict, List, Optional, Tuple + +import numpy as np +import structlog +from sklearn.metrics import ( + accuracy_score, + f1_score, + matthews_corrcoef, + precision_score, + recall_score, + roc_auc_score, +) +from structlog.stdlib import BoundLogger + +from dioptra import pyplugs + +from .metrics_exceptions import UnknownPerformanceMetricError + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +@pyplugs.register +def get_performance_metric_list( + request: List[Dict[str, str]] +) -> List[Tuple[str, Callable[..., float]]]: + """Gets multiple performance metric functions from the registry. + + The following metrics are available in the registry, + + - `accuracy` + - `roc_auc` + - `categorical_accuracy` + - `mcc` + - `f1` + - `precision` + - `recall` + + Args: + request: A list of dictionaries with the keys `name` and `func`. The `func` key + is used to lookup the metric function in the registry and must match one of + the metric names listed above. The `name` key is human-readable label for + the metric function. + + Returns: + A list of tuples with two elements. The first element of each tuple is the label + from the `name` key of `request`, and the second element is the callable metric + function. + """ + performance_metrics_list: List[Tuple[str, Callable[..., float]]] = [] + + for metric in request: + metric_callable: Optional[Callable[..., float]] = ( + PERFORMANCE_METRICS_REGISTRY.get(metric["func"]) + ) + + if metric_callable is not None: + performance_metrics_list.append((metric["name"], metric_callable)) + + else: + LOGGER.warn( + "Performance metric not in registry, skipping...", + name=metric["name"], + func=metric["func"], + ) + + return performance_metrics_list + + +@pyplugs.register +def get_performance_metric(func: str) -> Callable[..., float]: + """Gets a performance metric function from the registry. + + The following metrics are available in the registry, + + - `accuracy` + - `roc_auc` + - `categorical_accuracy` + - `mcc` + - `f1` + - `precision` + - `recall` + + Args: + func: A string that identifies the performance metric to return from the + registry. The string must match one of the names of the metrics in the + registry. + + Returns: + A callable performance metric function. + """ + metric_callable: Optional[Callable[..., float]] = PERFORMANCE_METRICS_REGISTRY.get( + func + ) + + if metric_callable is None: + LOGGER.error( + "Performance metric not in registry", + func=func, + ) + raise UnknownPerformanceMetricError( + f"Could not find any performance metric named {func!r} in the metrics " + "plugin collection. Check spelling and try again." + ) + + return metric_callable + + +def accuracy(y_true, y_pred, **kwargs) -> float: + """Calculates the accuracy score. + + Args: + y_true: A 1d array-like, or label indicator array containing the ground truth + labels. + y_pred: A 1d array-like, or label indicator array containing the predicted + labels, as returned by a classifier. + + Returns: + The fraction of correctly classified samples. + + See Also: + - :py:func:`sklearn.metrics.accuracy_score` + """ + metric: float = accuracy_score(y_true=y_true, y_pred=y_pred, **kwargs) + return metric + + +def roc_auc(y_true, y_pred, **kwargs) -> float: + """Calculates the Area Under the Receiver Operating Characteristic Curve (ROC AUC). + + Args: + y_true: An array-like of shape `(n_samples,)` or `(n_samples, n_classes)` + containing the ground truth labels. + y_pred: An array-like of shape `(n_samples,)` or `(n_samples, n_classes)` + containing the predicted labels, as returned by a classifier. + + Returns: + The ROC AUC. + + See Also: + - :py:func:`sklearn.metrics.roc_auc_score` + """ + metric: float = roc_auc_score(y_true=y_true, y_score=y_pred, **kwargs) + return metric + + +def categorical_accuracy(y_true, y_pred) -> float: + """Calculates the categorical accuracy. + + This function is a port of the Keras metric + :py:class:`~tf.keras.metrics.CategoricalAccuracy`. + + Args: + y_true: A 1d array-like, or label indicator array containing the ground truth + labels. + y_pred: A 1d array-like, or label indicator array containing the predicted + labels, as returned by a classifier. + + Returns: + The fraction of correctly classified samples. + """ + if len(y_true.shape) > 1 and len(y_pred.shape) > 1: + label_comparison: np.ndarray = np.argmax(y_true, axis=-1) == np.argmax( + y_pred, axis=-1 + ) + + else: + label_comparison = y_true == y_pred + + metric: float = float(np.mean(label_comparison)) + + return metric + + +def mcc(y_true, y_pred, **kwargs) -> float: + """Calculates the Matthews correlation coefficient. + + Args: + y_true: A 1d array containing the ground truth labels. + y_pred: A 1d array containing the predicted labels, as returned by a classifier. + + Returns: + The Matthews correlation coefficient (`+1` represents a perfect prediction, `0` + an average random prediction and `-1` and inverse prediction). + + See Also: + - :py:func:`sklearn.metrics.matthews_corrcoef` + """ + metric: float = matthews_corrcoef(y_true=y_true, y_pred=y_pred, **kwargs) + return metric + + +def f1(y_true, y_pred, **kwargs) -> float: + """Calculates the F1 score. + + Args: + y_true: A 1d array-like, or label indicator array containing the ground truth + labels. + y_pred: A 1d array-like, or label indicator array containing the predicted + labels, as returned by a classifier. + + Returns: + The F1 score of the positive class in binary classification or the weighted + average of the F1 scores of each class for the multiclass task. + + See Also: + - :py:func:`sklearn.metrics.f1_score` + """ + metric: float = f1_score(y_true=y_true, y_pred=y_pred, **kwargs) + return metric + + +def precision(y_true, y_pred, **kwargs) -> float: + """Calculates the precision score. + + Args: + y_true: A 1d array-like, or label indicator array containing the ground truth + labels. + y_pred: A 1d array-like, or label indicator array containing the predicted + labels, as returned by a classifier. + + Returns: + The precision of the positive class in binary classification or the weighted + average of the precision of each class for the multiclass task. + + See Also: + - :py:func:`sklearn.metrics.precision_score` + """ + metric: float = precision_score(y_true=y_true, y_pred=y_pred, **kwargs) + return metric + + +def recall(y_true, y_pred, **kwargs) -> float: + """Calculates the recall score. + + Args: + y_true: A 1d array-like, or label indicator array containing the ground truth + labels. + y_pred: A 1d array-like, or label indicator array containing the predicted + labels, as returned by a classifier. + + Returns: + The recall of the positive class in binary classification or the weighted + average of the recall of each class for the multiclass task. + + See Also: + - :py:func:`sklearn.metrics.recall_score` + """ + metric: float = recall_score(y_true=y_true, y_pred=y_pred, **kwargs) + return metric + + +PERFORMANCE_METRICS_REGISTRY: Dict[str, Callable[..., Any]] = dict( + accuracy=accuracy, + roc_auc=roc_auc, + categorical_accuracy=categorical_accuracy, + mcc=mcc, + f1=f1, + precision=precision, + recall=recall, +) + +@pyplugs.register +def evaluate_metrics_generic(y_true, y_pred, metrics, func_kwargs) -> Dict[str, float]: + names = [] + result = [] + for metric in metrics: + name = metric[0] + func = metric[1] + + extra_kwargs = func_kwargs.get(name, {}) + + names += [name] + metric_output = func(y_true.copy(), y_pred.copy(), **extra_kwargs) + result += [metric_output] + + return dict(zip(names, result)) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py index 6c1ba4e6c..e57ab1257 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -30,7 +30,7 @@ ) from dioptra import pyplugs -from .tensorflow import get_optimizer, get_model_callbacks, get_performance_metrics, evaluate_metrics_tensorflow +from .tensorflow import get_optimizer, get_model_callbacks, get_performance_metrics, evaluate_metrics_tensorflow, predict_tensorflow from .estimators_keras_classifiers import init_classifier from .registry_art import load_wrapped_tensorflow_keras_classifier from .registry_mlflow import load_tensorflow_keras_classifier @@ -38,7 +38,7 @@ from .random_sample import draw_random_integer from .backend_configs_tensorflow import init_tensorflow from .tracking_mlflow import log_parameters, log_tensorflow_keras_estimator, log_metrics -from .data_tensorflow import get_n_classes_from_directory_iterator, create_image_dataset +from .data_tensorflow import get_n_classes_from_directory_iterator, create_image_dataset, predictions_to_df, df_to_predictions from .estimators_methods import fit from .mlflow import add_model_to_registry from .artifacts_restapi import get_uri_for_model, get_uris_for_job, get_uris_for_artifacts @@ -48,13 +48,15 @@ from .artifacts_mlflow import upload_directory_as_tarball_artifact, upload_data_frame_artifact, download_all_artifacts from .defenses_image_preprocessing import create_defended_dataset from .attacks_patch import create_adversarial_patches, create_adversarial_patch_dataset +from .metrics_performance import get_performance_metric_list, evaluate_metrics_generic LOGGER: BoundLogger = structlog.stdlib.get_logger() @pyplugs.register def load_dataset( ep_seed: int = 10145783023, - data_dir: str = "/dioptra/data/Mnist/testing", + training_dir: str = "/dioptra/data/Mnist/training", + testing_dir: str = "/dioptra/data/Mnist/testing", subsets: List[str] = ['testing'], image_size: Tuple[int, int, int] = [28, 28, 1], rescale: float = 1.0 / 255, @@ -72,7 +74,7 @@ def load_dataset( 'tensorflow_global_seed':global_seed, 'dataset_seed':dataset_seed}) training_dataset = None if "training" not in subsets else create_image_dataset( - data_dir=data_dir, + data_dir=training_dir, subset="training", image_size=image_size, seed=dataset_seed, @@ -84,7 +86,7 @@ def load_dataset( ) validation_dataset = None if "validation" not in subsets else create_image_dataset( - data_dir=data_dir, + data_dir=training_dir, subset="validation", image_size=image_size, seed=dataset_seed, @@ -95,7 +97,7 @@ def load_dataset( shuffle=shuffle ) testing_dataset = None if "testing" not in subsets else create_image_dataset( - data_dir=data_dir, + data_dir=testing_dir, subset=None, image_size=image_size, seed=dataset_seed, @@ -179,13 +181,21 @@ def save_artifacts_and_models( ) @pyplugs.register def load_artifacts_for_job( - job_id: str, extract_files: List[str|Path] = None + job_id: str, + files: List[str|Path] = None, + extract_files: List[str|Path] = None ): + files = [] if files is None else files extract_files = [] if extract_files is None else extract_files + files += extract_files # need to download them to be able to extract + uris = get_uris_for_job(job_id) - paths = download_all_artifacts(uris, extract_files) + paths = download_all_artifacts(uris, files) for extract in paths: - extract_tarfile(extract) + for ef in extract_files: + if (ef.endswith(str(ef))): + extract_tarfile(extract) + return paths @pyplugs.register def load_artifacts( @@ -289,12 +299,26 @@ def augment_patch( scale_max=scale_max ) @pyplugs.register -def compute_metrics( +def model_metrics( classifier: Any, dataset: Any ): metrics = evaluate_metrics_tensorflow(classifier, dataset) log_metrics(metrics) + return metrics + +@pyplugs.register +def prediction_metrics( + y_true: np.ndarray, + y_pred: np.ndarray, + metrics_list: List[Dict[str, str]], + func_kwargs: Dict[str, Dict[str, Any]] = None +): + func_kwargs = {} if func_kwargs is None else func_kwargs + callable_list = get_performance_metric_list(metrics_list) + metrics = evaluate_metrics_generic(y_true, y_pred, callable_list, func_kwargs) + log_metrics(metrics) + return metrics @pyplugs.register def augment_data( @@ -320,5 +344,38 @@ def augment_data( return defended_dataset @pyplugs.register -def predict(): - pass +def predict( + classifier: Any, + dataset: Any, + show_actual: bool = False, + show_target: bool = False, +): + predictions = predict_tensorflow(classifier, dataset) + df = predictions_to_df( + predictions, + dataset, + show_actual=show_actual, + show_target=show_target) + return df + +@pyplugs.register +def load_predictions( + paths: List[str], + filename: str, + format: str = 'csv', + dataset: DirectoryIterator = None, + n_classes: int = -1, +): + loc = None + for m in paths: + if m.endswith(filename): + loc = m + if (format == 'csv'): + df = pd.read_csv(loc) + elif (format == 'json'): + df = pd.read_json(loc) + y_true, y_pred = df_to_predictions(df, dataset, n_classes) + return y_true, y_pred + + + diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py index 52dfeea26..e502f8729 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/tensorflow.py @@ -41,13 +41,17 @@ package="tensorflow", ) - @pyplugs.register @require_package("tensorflow", exc_type=TensorflowDependencyError) def evaluate_metrics_tensorflow(classifier, dataset) -> Dict[str, float]: result = classifier.evaluate(dataset, verbose=0, return_dict=True) return result +@pyplugs.register +@require_package("tensorflow", exc_type=TensorflowDependencyError) +def predict_tensorflow(classifier, dataset) -> Dict[str, float]: + result = classifier.predict(dataset, verbose=0) + return result @pyplugs.register @require_package("tensorflow", exc_type=TensorflowDependencyError) From c18b24304c5e3d470066ecc3d2747daa8ebe360a Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:33:44 -0400 Subject: [PATCH 11/18] examples: removing unused entrypoint params and save performance metrics to csv --- .../mnist-classifier-demo/src/defense.yml | 5 ---- .../mnist-classifier-demo/src/evaluate.yml | 1 - .../mnist-classifier-demo/src/metrics.yml | 24 +++++++++++++++++-- .../mnist-classifier-demo/src/predict.yml | 1 - .../dioptra_custom/fgm_mnist_demo/plugins.py | 4 ++-- 5 files changed, 24 insertions(+), 11 deletions(-) diff --git a/examples/mnist-classifier-demo/src/defense.yml b/examples/mnist-classifier-demo/src/defense.yml index 508dca6d3..a155907c6 100644 --- a/examples/mnist-classifier-demo/src/defense.yml +++ b/examples/mnist-classifier-demo/src/defense.yml @@ -40,7 +40,6 @@ types: list: name_parameters parameters: - data_dir: /dioptra/data/Mnist/testing image_size: [28, 28, 1] job_id: adv_tar_name: testing_adversarial_fgm.tar.gz @@ -52,10 +51,6 @@ parameters: model_version: -1 clip_values: [0, 1] batch_size: 50 - eps: 0.3 - eps_step: 0.1 - minimal: false - norm: "inf" seed: -1 def_type: spatial_smoothing defense_kwargs: {} diff --git a/examples/mnist-classifier-demo/src/evaluate.yml b/examples/mnist-classifier-demo/src/evaluate.yml index 81665aba8..402b04106 100644 --- a/examples/mnist-classifier-demo/src/evaluate.yml +++ b/examples/mnist-classifier-demo/src/evaluate.yml @@ -18,7 +18,6 @@ types: tuple: [integer, integer, integer] parameters: - run_id: "" image_size: [28, 28, 1] model_name: mnist_classifier model_version: -1 diff --git a/examples/mnist-classifier-demo/src/metrics.yml b/examples/mnist-classifier-demo/src/metrics.yml index c87971de2..79569c369 100644 --- a/examples/mnist-classifier-demo/src/metrics.yml +++ b/examples/mnist-classifier-demo/src/metrics.yml @@ -33,7 +33,6 @@ types: metric_output: mapping: [string, any] parameters: - run_id: "" image_size: [28, 28, 1] data_dir: /dioptra/data/Mnist/testing job_id: @@ -125,6 +124,16 @@ tasks: outputs: - metric_results: metric_output + save_artifacts_and_models: + plugin: dioptra_custom.fgm_mnist_demo.plugins.save_artifacts_and_models + inputs: + - name: artifacts + type: artifact_list + required: false + - name: models + type: model_list + required: false + graph: predictions: load_artifacts_for_job: @@ -156,4 +165,15 @@ graph: - name: roc_auc func: roc_auc dependencies: - - format_predictions \ No newline at end of file + - format_predictions + + save: + save_artifacts_and_models: + artifacts: + - type: dataframe + data_frame: $metrics + file_name: $metrics_filename + file_format: $metrics_format + file_format_kwargs: + dependencies: + - metrics \ No newline at end of file diff --git a/examples/mnist-classifier-demo/src/predict.yml b/examples/mnist-classifier-demo/src/predict.yml index 990bc3558..76905aa32 100644 --- a/examples/mnist-classifier-demo/src/predict.yml +++ b/examples/mnist-classifier-demo/src/predict.yml @@ -23,7 +23,6 @@ types: tuple: [integer, integer, integer] ndarray: parameters: - run_id: "" image_size: [28, 28, 1] model_name: mnist_classifier model_version: -1 diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py index e57ab1257..4b7b97405 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -318,7 +318,7 @@ def prediction_metrics( callable_list = get_performance_metric_list(metrics_list) metrics = evaluate_metrics_generic(y_true, y_pred, callable_list, func_kwargs) log_metrics(metrics) - return metrics + return pd.DataFrame(metrics, index=[1]) @pyplugs.register def augment_data( @@ -326,7 +326,7 @@ def augment_data( def_data_dir: Union[str, Path], image_size: Tuple[int, int, int], distance_metrics: List[Dict[str, str]], - batch_size: int = 32, + batch_size: int = 50, def_type: str = "spatial_smoothing", defense_kwargs: Optional[Dict[str, Any]] = None, ): From fa9d71439ecd90e7e282fec98601f1b33d53f0e9 Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Wed, 16 Oct 2024 15:49:33 -0400 Subject: [PATCH 12/18] examples: remove unused variable --- examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py index 4b7b97405..5cd1eb6f7 100644 --- a/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py +++ b/examples/task-plugins/dioptra_custom/fgm_mnist_demo/plugins.py @@ -152,7 +152,7 @@ def train( fit_kwargs = {} if fit_kwargs is None else fit_kwargs callbacks = get_model_callbacks(callbacks_list) fit_kwargs['callbacks'] = callbacks - trained_model = fit(estimator=estimator, x=x, y=y, fit_kwargs=fit_kwargs) + fit(estimator=estimator, x=x, y=y, fit_kwargs=fit_kwargs) return estimator @pyplugs.register From 97067385288db2d0b430c3475f0c565babadd9ff Mon Sep 17 00:00:00 2001 From: jtsextonMITRE <45762017+jtsextonMITRE@users.noreply.github.com> Date: Thu, 19 Dec 2024 14:17:31 -0500 Subject: [PATCH 13/18] examples: pull changes from dev to get access to new client --- .github/workflows/pip-compile.yml | 17 +- .gitignore | 1 - .../scripts/init-frontend.sh | 7 +- ...64-py3.11-mlflow-tracking-requirements.txt | 158 +- ...-amd64-py3.11-pytorch-cpu-requirements.txt | 253 +- ...-amd64-py3.11-pytorch-gpu-requirements.txt | 253 +- ...inux-amd64-py3.11-restapi-requirements.txt | 130 +- ...64-py3.11-tensorflow2-cpu-requirements.txt | 257 +- ...64-py3.11-tensorflow2-gpu-requirements.txt | 257 +- ...64-py3.11-mlflow-tracking-requirements.txt | 158 +- ...-arm64-py3.11-pytorch-cpu-requirements.txt | 251 +- ...inux-arm64-py3.11-restapi-requirements.txt | 130 +- ...64-py3.11-tensorflow2-cpu-requirements.txt | 257 +- docker/shellscripts/healthcheck-worker.m4 | 9 +- pyproject.toml | 2 + ...-amd64-py3.11-requirements-dev-pytorch.txt | 380 +- ...d64-py3.11-requirements-dev-tensorflow.txt | 384 +- .../linux-amd64-py3.11-requirements-dev.txt | 376 +- ...-arm64-py3.11-requirements-dev-pytorch.txt | 378 +- ...m64-py3.11-requirements-dev-tensorflow.txt | 384 +- .../linux-arm64-py3.11-requirements-dev.txt | 376 +- ...-amd64-py3.11-requirements-dev-pytorch.txt | 378 +- ...d64-py3.11-requirements-dev-tensorflow.txt | 384 +- .../macos-amd64-py3.11-requirements-dev.txt | 376 +- ...-arm64-py3.11-requirements-dev-pytorch.txt | 376 +- ...m64-py3.11-requirements-dev-tensorflow.txt | 382 +- .../macos-arm64-py3.11-requirements-dev.txt | 374 +- ...-amd64-py3.11-requirements-dev-pytorch.txt | 382 +- ...d64-py3.11-requirements-dev-tensorflow.txt | 388 +- .../win-amd64-py3.11-requirements-dev.txt | 380 +- src/dioptra/client/__init__.py | 14 +- src/dioptra/client/_client.py | 746 ---- src/dioptra/client/artifacts.py | 165 + .../v1/tags/errors.py => client/auth.py} | 53 +- src/dioptra/client/base.py | 496 +++ src/dioptra/client/client.py | 218 + src/dioptra/client/drafts.py | 404 ++ src/dioptra/client/entrypoints.py | 571 +++ src/dioptra/client/experiments.py | 704 +++ src/dioptra/client/groups.py | 81 + src/dioptra/client/jobs.py | 205 + src/dioptra/client/models.py | 434 ++ src/dioptra/client/plugin_parameter_types.py | 316 ++ src/dioptra/client/plugins.py | 637 +++ src/dioptra/client/queues.py | 286 ++ src/dioptra/client/sessions.py | 718 +++ src/dioptra/client/snapshots.py | 88 + src/dioptra/client/tags.py | 268 ++ src/dioptra/client/users.py | 178 + src/dioptra/client/workflows.py | 88 + src/dioptra/restapi/db/db.py | 7 +- src/dioptra/restapi/db/models/utils.py | 160 + src/dioptra/restapi/errors.py | 433 +- src/dioptra/restapi/utils.py | 24 + src/dioptra/restapi/v1/artifacts/__init__.py | 3 - src/dioptra/restapi/v1/artifacts/service.py | 40 +- .../restapi/v1/entrypoints/__init__.py | 3 - src/dioptra/restapi/v1/entrypoints/errors.py | 77 - src/dioptra/restapi/v1/entrypoints/service.py | 147 +- .../restapi/v1/experiments/__init__.py | 3 - src/dioptra/restapi/v1/experiments/errors.py | 53 - src/dioptra/restapi/v1/experiments/service.py | 89 +- src/dioptra/restapi/v1/groups/__init__.py | 3 - src/dioptra/restapi/v1/groups/errors.py | 41 - src/dioptra/restapi/v1/groups/service.py | 39 +- src/dioptra/restapi/v1/jobs/__init__.py | 3 - src/dioptra/restapi/v1/jobs/errors.py | 105 - src/dioptra/restapi/v1/jobs/service.py | 81 +- src/dioptra/restapi/v1/models/__init__.py | 3 - src/dioptra/restapi/v1/models/errors.py | 65 - src/dioptra/restapi/v1/models/service.py | 81 +- .../v1/plugin_parameter_types/__init__.py | 3 - .../v1/plugin_parameter_types/errors.py | 95 - .../v1/plugin_parameter_types/service.py | 92 +- src/dioptra/restapi/v1/plugins/__init__.py | 4 - src/dioptra/restapi/v1/plugins/errors.py | 121 - src/dioptra/restapi/v1/plugins/service.py | 198 +- src/dioptra/restapi/v1/queues/__init__.py | 3 +- src/dioptra/restapi/v1/queues/errors.py | 63 - src/dioptra/restapi/v1/queues/service.py | 64 +- .../restapi/v1/shared/drafts/service.py | 19 +- .../restapi/v1/shared/snapshots/service.py | 21 +- src/dioptra/restapi/v1/shared/tags/service.py | 19 +- src/dioptra/restapi/v1/tags/__init__.py | 3 - src/dioptra/restapi/v1/tags/service.py | 63 +- src/dioptra/restapi/v1/users/__init__.py | 3 - src/dioptra/restapi/v1/users/errors.py | 115 - src/dioptra/restapi/v1/users/service.py | 72 +- src/dioptra/restapi/v1/workflows/errors.py | 41 - .../v1/workflows/lib/run_dioptra_job.py.tmpl | 20 +- src/dioptra/restapi/v1/workflows/lib/views.py | 21 +- src/dioptra/restapi/v1/workflows/service.py | 3 +- src/dioptra/rq/tasks/run_v1_dioptra_job.py | 142 +- src/frontend/package-lock.json | 3927 +++++++++++++++++ src/frontend/package.json | 3 +- src/frontend/src/components/CodeEditor.vue | 129 +- .../src/components/TableComponent.vue | 69 +- .../src/dialogs/AssignPluginsDialog.vue | 2 +- src/frontend/src/dialogs/AssignTagsDialog.vue | 2 +- src/frontend/src/dialogs/DialogComponent.vue | 4 +- src/frontend/src/dialogs/LeaveFormDialog.vue | 4 +- .../src/dialogs/ReturnExperimentsDialog.vue | 32 - .../src/dialogs/ReturnToFormDialog.vue | 26 + src/frontend/src/services/dataApi.ts | 43 +- src/frontend/src/stores/DataStore.ts | 108 - src/frontend/src/stores/LoginStore.ts | 7 +- src/frontend/src/views/AllJobsView.vue | 1 + src/frontend/src/views/ArtifactsView.vue | 2 +- src/frontend/src/views/CreateEntryPoint.vue | 331 +- src/frontend/src/views/CreateExperiment.vue | 92 +- src/frontend/src/views/CreateJob.vue | 103 +- src/frontend/src/views/CreatePluginFile.vue | 161 +- src/frontend/src/views/EntryPointsView.vue | 3 +- src/frontend/src/views/ExperimentsView.vue | 1 + src/frontend/src/views/GroupsAdminView.vue | 5 +- src/frontend/src/views/GroupsView.vue | 1 + src/frontend/src/views/JobsView.vue | 4 +- src/frontend/src/views/ModelsView.vue | 2 + src/frontend/src/views/PluginFiles.vue | 2 +- src/frontend/src/views/PluginParamsView.vue | 2 +- src/frontend/src/views/PluginsView.vue | 1 + src/frontend/src/views/QueuesView.vue | 1 + src/frontend/src/views/TagsView.vue | 1 + tests/unit/restapi/conftest.py | 8 + tests/unit/restapi/lib/__init__.py | 4 +- tests/unit/restapi/lib/actions.py | 2 +- tests/unit/restapi/lib/asserts.py | 304 +- tests/unit/restapi/lib/client.py | 396 ++ tests/unit/restapi/lib/routines.py | 198 + tests/unit/restapi/test_depth_limited_repr.py | 147 + .../unit/restapi/test_utils.py | 71 +- tests/unit/restapi/v1/conftest.py | 4 +- tests/unit/restapi/v1/test_artifact.py | 118 +- tests/unit/restapi/v1/test_entrypoint.py | 823 ++-- tests/unit/restapi/v1/test_experiment.py | 674 ++- tests/unit/restapi/v1/test_group.py | 62 +- tests/unit/restapi/v1/test_job.py | 316 +- tests/unit/restapi/v1/test_model.py | 555 +-- tests/unit/restapi/v1/test_plugin.py | 980 ++-- .../restapi/v1/test_plugin_parameter_type.py | 588 +-- tests/unit/restapi/v1/test_queue.py | 485 +- tests/unit/restapi/v1/test_tag.py | 216 +- tests/unit/restapi/v1/test_user.py | 353 +- 143 files changed, 18792 insertions(+), 9734 deletions(-) delete mode 100644 src/dioptra/client/_client.py create mode 100644 src/dioptra/client/artifacts.py rename src/dioptra/{restapi/v1/tags/errors.py => client/auth.py} (51%) create mode 100644 src/dioptra/client/base.py create mode 100644 src/dioptra/client/client.py create mode 100644 src/dioptra/client/drafts.py create mode 100644 src/dioptra/client/entrypoints.py create mode 100644 src/dioptra/client/experiments.py create mode 100644 src/dioptra/client/groups.py create mode 100644 src/dioptra/client/jobs.py create mode 100644 src/dioptra/client/models.py create mode 100644 src/dioptra/client/plugin_parameter_types.py create mode 100644 src/dioptra/client/plugins.py create mode 100644 src/dioptra/client/queues.py create mode 100644 src/dioptra/client/sessions.py create mode 100644 src/dioptra/client/snapshots.py create mode 100644 src/dioptra/client/tags.py create mode 100644 src/dioptra/client/users.py create mode 100644 src/dioptra/client/workflows.py create mode 100644 src/dioptra/restapi/db/models/utils.py delete mode 100644 src/dioptra/restapi/v1/entrypoints/errors.py delete mode 100644 src/dioptra/restapi/v1/experiments/errors.py delete mode 100644 src/dioptra/restapi/v1/groups/errors.py delete mode 100644 src/dioptra/restapi/v1/jobs/errors.py delete mode 100644 src/dioptra/restapi/v1/models/errors.py delete mode 100644 src/dioptra/restapi/v1/plugin_parameter_types/errors.py delete mode 100644 src/dioptra/restapi/v1/plugins/errors.py delete mode 100644 src/dioptra/restapi/v1/queues/errors.py delete mode 100644 src/dioptra/restapi/v1/users/errors.py delete mode 100644 src/dioptra/restapi/v1/workflows/errors.py create mode 100644 src/frontend/package-lock.json delete mode 100644 src/frontend/src/dialogs/ReturnExperimentsDialog.vue create mode 100644 src/frontend/src/dialogs/ReturnToFormDialog.vue delete mode 100644 src/frontend/src/stores/DataStore.ts create mode 100644 tests/unit/restapi/lib/client.py create mode 100644 tests/unit/restapi/lib/routines.py create mode 100644 tests/unit/restapi/test_depth_limited_repr.py rename src/dioptra/restapi/v1/artifacts/errors.py => tests/unit/restapi/test_utils.py (51%) diff --git a/.github/workflows/pip-compile.yml b/.github/workflows/pip-compile.yml index 03da8b6b9..ce243bf70 100644 --- a/.github/workflows/pip-compile.yml +++ b/.github/workflows/pip-compile.yml @@ -222,17 +222,24 @@ jobs: - name: install dos2unix tool run: sudo apt install -y dos2unix - - name: download all the compiled requirements files + - name: download all the compiled requirements files - x86-64 uses: actions/download-artifact@v4 with: - path: artifacts + pattern: py* + merge-multiple: true + + - name: download all the compiled requirements files - aarch64 + uses: actions/download-artifact@v4 + with: + pattern: requirements-* + merge-multiple: true - name: standardize and consolidate files into venvs directory run: | - find artifacts -type f -name "*requirements*.txt" -exec chmod 0644 {} \; - find artifacts -type f -name "*requirements*.txt" -exec dos2unix {} \; + find . -type f -name "*requirements*.txt" -exec chmod 0644 {} \; + find . -type f -name "*requirements*.txt" -exec dos2unix {} \; mkdir venvs - mv artifacts/**/*.txt venvs + mv *.txt venvs - name: archive the consolidated requirements files uses: actions/upload-artifact@v4 diff --git a/.gitignore b/.gitignore index 859ea84c4..73c06ab91 100644 --- a/.gitignore +++ b/.gitignore @@ -398,7 +398,6 @@ vignettes/*.pdf # Javascript node_modules/ -package-lock.json # --------------------------- # BEGIN Whitelisted Files diff --git a/cookiecutter-templates/cookiecutter-dioptra-deployment/{{cookiecutter.__project_slug}}/scripts/init-frontend.sh b/cookiecutter-templates/cookiecutter-dioptra-deployment/{{cookiecutter.__project_slug}}/scripts/init-frontend.sh index 1a4e52023..cf0548169 100755 --- a/cookiecutter-templates/cookiecutter-dioptra-deployment/{{cookiecutter.__project_slug}}/scripts/init-frontend.sh +++ b/cookiecutter-templates/cookiecutter-dioptra-deployment/{{cookiecutter.__project_slug}}/scripts/init-frontend.sh @@ -289,6 +289,7 @@ prepare_build_dir() { "public" "index.html" "package.json" + "package-lock.json" "tsconfig.json" "tsconfig.app.json" "tsconfig.node.json" @@ -351,10 +352,10 @@ copy_dist_to_output() { compile_vue_js_frontend() { cd "${BUILD_DIR}" - log_info "Installing node packages using npm install" + log_info "Installing node packages using npm ci" - if ! npm install; then - log_error "Installing node modules using npm install failed, exiting..." + if ! npm ci; then + log_error "Installing node modules using npm ci failed, exiting..." exit 1 fi diff --git a/docker/requirements/linux-amd64-py3.11-mlflow-tracking-requirements.txt b/docker/requirements/linux-amd64-py3.11-mlflow-tracking-requirements.txt index 8c305893f..27c4b8035 100644 --- a/docker/requirements/linux-amd64-py3.11-mlflow-tracking-requirements.txt +++ b/docker/requirements/linux-amd64-py3.11-mlflow-tracking-requirements.txt @@ -4,65 +4,69 @@ # # pip-compile --output-file=venvs/linux-amd64-py3.11-mlflow-tracking-requirements.txt docker/pip-tools/mlflow-tracking-requirements.in # -alembic==1.13.2 +alembic==1.14.0 # via mlflow -aniso8601==9.0.1 - # via graphene -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via -r docker/pip-tools/mlflow-tracking-requirements.in -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via # flask - # mlflow -cloudpickle==3.0.0 - # via mlflow -contourpy==1.2.1 + # mlflow-skinny +cloudpickle==3.1.0 + # via mlflow-skinny +contourpy==1.3.1 # via matplotlib cycler==0.12.1 # via matplotlib -deprecated==1.2.14 - # via opentelemetry-api +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions docker==7.1.0 # via mlflow -entrypoints==0.4 - # via mlflow -flask==3.0.3 +flask==3.1.0 # via mlflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -idna==3.7 +idna==3.10 # via requests -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via - # mlflow + # mlflow-skinny # opentelemetry-api itsdangerous==2.2.0 # via flask @@ -76,112 +80,118 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib -mako==1.3.5 +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via mlflow -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -matplotlib==3.9.1 +matplotlib==3.9.2 # via mlflow -mlflow==2.14.2 +mlflow==2.18.0 # via -r docker/pip-tools/mlflow-tracking-requirements.in -numpy==1.26.4 +mlflow-skinny==2.18.0 + # via mlflow +numpy==2.1.3 # via # contourpy # matplotlib # mlflow # pandas - # pyarrow # scikit-learn # scipy -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # gunicorn # matplotlib - # mlflow -pandas==2.2.2 + # mlflow-skinny +pandas==2.2.3 # via mlflow -pillow==10.4.0 +pillow==11.0.0 # via matplotlib -protobuf==4.25.3 - # via mlflow -psycopg2-binary==2.9.9 +protobuf==5.28.3 + # via mlflow-skinny +psycopg2-binary==2.9.10 # via -r docker/pip-tools/mlflow-tracking-requirements.in -pyarrow==15.0.2 +pyarrow==18.1.0 # via mlflow -pyparsing==3.1.2 +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth +pyparsing==3.2.0 # via matplotlib python-dateutil==2.9.0.post0 # via # botocore + # graphene # matplotlib # pandas -pytz==2024.1 - # via - # mlflow - # pandas -pyyaml==6.0.1 - # via mlflow -querystring-parser==1.2.4 - # via mlflow +pytz==2024.2 + # via pandas +pyyaml==6.0.2 + # via mlflow-skinny requests==2.32.3 # via + # databricks-sdk # docker - # mlflow -s3transfer==0.10.2 + # mlflow-skinny +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via mlflow -scipy==1.14.0 +scipy==1.14.1 # via # mlflow # scikit-learn -simplejson==3.19.2 +simplejson==3.19.3 # via -r docker/pip-tools/mlflow-tracking-requirements.in six==1.16.0 - # via - # python-dateutil - # querystring-parser + # via python-dateutil smmap==5.0.1 # via gitdb -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny threadpoolctl==3.5.0 # via scikit-learn typing-extensions==4.12.2 # via # alembic + # graphene # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # docker # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via flask -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata diff --git a/docker/requirements/linux-amd64-py3.11-pytorch-cpu-requirements.txt b/docker/requirements/linux-amd64-py3.11-pytorch-cpu-requirements.txt index b2e2c0152..e9a963372 100644 --- a/docker/requirements/linux-amd64-py3.11-pytorch-cpu-requirements.txt +++ b/docker/requirements/linux-amd64-py3.11-pytorch-cpu-requirements.txt @@ -8,36 +8,38 @@ absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -alembic==1.13.2 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -async-timeout==4.0.3 + # via flask-restx +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -45,42 +47,44 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -deprecated==1.2.14 - # via opentelemetry-api -distributed==2024.6.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distributed==2024.11.2 # via prefect docker==7.1.0 # via # mlflow # prefect entrypoints==0.4 - # via - # dioptra (pyproject.toml) - # mlflow -filelock==3.15.4 + # via dioptra (pyproject.toml) +filelock==3.16.1 # via # torch # triton -flask==3.0.3 +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -89,9 +93,9 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -105,48 +109,52 @@ flask-sqlalchemy==3.1.1 # via # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -idna==3.7 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +idna==3.10 # via requests -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -162,13 +170,13 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -176,20 +184,24 @@ locket==1.0.0 # via # distributed # partd -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts @@ -197,20 +209,21 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools mdurl==0.1.2 # via markdown-it-py -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect @@ -218,28 +231,27 @@ multimethod==1.12 # via dioptra (pyproject.toml) mypy-extensions==1.0.0 # via prefect -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via # adversarial-robustness-toolbox # contourpy # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -276,7 +288,7 @@ nvidia-cusparse-cu12==12.1.0.106 # torch nvidia-nccl-cu12==2.19.3 # via torch -nvidia-nvjitlink-cu12==12.5.82 +nvidia-nvjitlink-cu12==12.6.85 # via # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 @@ -286,18 +298,16 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # dask # distributed @@ -305,10 +315,11 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # prefect # scikit-image -pandas==2.2.2 + # tensorboard +pandas==2.2.3 # via # dioptra (pyproject.toml) # mlflow @@ -318,7 +329,7 @@ passlib==1.7.4 # via dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -330,27 +341,31 @@ pillow==10.4.0 # torchvision prefect==1.4.1 # via dioptra (pyproject.toml) -protobuf==4.25.3 +protobuf==5.28.3 # via - # mlflow + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via distributed -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk pygments==2.18.0 # via rich -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra (pyproject.toml) # matplotlib @@ -361,6 +376,7 @@ python-dateutil==2.9.0.post0 # botocore # croniter # dioptra (pyproject.toml) + # graphene # matplotlib # pandas # pendulum @@ -370,25 +386,20 @@ python-json-logger==2.0.7 # via dioptra (pyproject.toml) python-slugify==8.0.4 # via prefect -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dask # dioptra (pyproject.toml) # distributed - # mlflow + # mlflow-skinny # prefect -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -398,49 +409,52 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # docker - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox + # heart-library # imgaug # python-dateutil - # querystring-parser # tensorboard smmap==5.0.1 # via gitdb @@ -473,23 +487,23 @@ smqtk-image-io==0.17.1 # smqtk-detection sortedcontainers==2.4.0 # via distributed -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow -structlog==24.2.0 +sqlparse==0.5.2 + # via mlflow-skinny +structlog==24.4.0 # via dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -497,13 +511,13 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum toml==0.10.2 # via prefect -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -517,11 +531,12 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via distributed -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # nrtk triton==2.2.0 # via torch @@ -529,21 +544,23 @@ typing-extensions==4.12.2 # via # alembic # dioptra (pyproject.toml) + # graphene + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed # docker # prefect # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask @@ -551,11 +568,11 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/docker/requirements/linux-amd64-py3.11-pytorch-gpu-requirements.txt b/docker/requirements/linux-amd64-py3.11-pytorch-gpu-requirements.txt index 516be686f..6585d2b5a 100644 --- a/docker/requirements/linux-amd64-py3.11-pytorch-gpu-requirements.txt +++ b/docker/requirements/linux-amd64-py3.11-pytorch-gpu-requirements.txt @@ -8,36 +8,38 @@ absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -alembic==1.13.2 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -async-timeout==4.0.3 + # via flask-restx +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -45,42 +47,44 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -deprecated==1.2.14 - # via opentelemetry-api -distributed==2024.6.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distributed==2024.11.2 # via prefect docker==7.1.0 # via # mlflow # prefect entrypoints==0.4 - # via - # dioptra (pyproject.toml) - # mlflow -filelock==3.15.4 + # via dioptra (pyproject.toml) +filelock==3.16.1 # via # torch # triton -flask==3.0.3 +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -89,9 +93,9 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -105,48 +109,52 @@ flask-sqlalchemy==3.1.1 # via # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -idna==3.7 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +idna==3.10 # via requests -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -162,13 +170,13 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -176,20 +184,24 @@ locket==1.0.0 # via # distributed # partd -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts @@ -197,20 +209,21 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools mdurl==0.1.2 # via markdown-it-py -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect @@ -218,28 +231,27 @@ multimethod==1.12 # via dioptra (pyproject.toml) mypy-extensions==1.0.0 # via prefect -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via # adversarial-robustness-toolbox # contourpy # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -276,7 +288,7 @@ nvidia-cusparse-cu12==12.1.0.106 # torch nvidia-nccl-cu12==2.19.3 # via torch -nvidia-nvjitlink-cu12==12.5.82 +nvidia-nvjitlink-cu12==12.6.85 # via # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 @@ -286,18 +298,16 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # dask # distributed @@ -305,10 +315,11 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # prefect # scikit-image -pandas==2.2.2 + # tensorboard +pandas==2.2.3 # via # dioptra (pyproject.toml) # mlflow @@ -318,7 +329,7 @@ passlib==1.7.4 # via dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -330,27 +341,31 @@ pillow==10.4.0 # torchvision prefect==1.4.1 # via dioptra (pyproject.toml) -protobuf==4.25.3 +protobuf==5.28.3 # via - # mlflow + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via distributed -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk pygments==2.18.0 # via rich -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra (pyproject.toml) # matplotlib @@ -361,6 +376,7 @@ python-dateutil==2.9.0.post0 # botocore # croniter # dioptra (pyproject.toml) + # graphene # matplotlib # pandas # pendulum @@ -370,25 +386,20 @@ python-json-logger==2.0.7 # via dioptra (pyproject.toml) python-slugify==8.0.4 # via prefect -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dask # dioptra (pyproject.toml) # distributed - # mlflow + # mlflow-skinny # prefect -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -398,49 +409,52 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # docker - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox + # heart-library # imgaug # python-dateutil - # querystring-parser # tensorboard smmap==5.0.1 # via gitdb @@ -473,23 +487,23 @@ smqtk-image-io==0.17.1 # smqtk-detection sortedcontainers==2.4.0 # via distributed -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow -structlog==24.2.0 +sqlparse==0.5.2 + # via mlflow-skinny +structlog==24.4.0 # via dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -497,13 +511,13 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum toml==0.10.2 # via prefect -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -517,11 +531,12 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch-gpu.in torchvision==0.17.2 # via -r requirements-dev-pytorch-gpu.in -tornado==6.4.1 +tornado==6.4.2 # via distributed -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # nrtk triton==2.2.0 # via torch @@ -529,21 +544,23 @@ typing-extensions==4.12.2 # via # alembic # dioptra (pyproject.toml) + # graphene + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed # docker # prefect # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask @@ -551,11 +568,11 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/docker/requirements/linux-amd64-py3.11-restapi-requirements.txt b/docker/requirements/linux-amd64-py3.11-restapi-requirements.txt index 3b620afaf..1bac1bda1 100644 --- a/docker/requirements/linux-amd64-py3.11-restapi-requirements.txt +++ b/docker/requirements/linux-amd64-py3.11-restapi-requirements.txt @@ -4,31 +4,33 @@ # # pip-compile --extra=mlflow-skinny --output-file=venvs/linux-amd64-py3.11-restapi-requirements.txt docker/pip-tools/restapi-requirements.in pyproject.toml # -alembic==1.13.2 +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate aniso8601==9.0.1 # via flask-restx -async-timeout==4.0.3 +async-timeout==5.0.1 # via -r docker/pip-tools/restapi-requirements.in -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow-skinny -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -36,15 +38,17 @@ click==8.1.7 # flask # mlflow-skinny # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via mlflow-skinny -deprecated==1.2.14 - # via opentelemetry-api -entrypoints==0.4 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 # via - # dioptra (pyproject.toml) - # mlflow-skinny -flask==3.0.3 + # opentelemetry-api + # opentelemetry-semantic-conventions +entrypoints==0.4 + # via dioptra (pyproject.toml) +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -52,9 +56,9 @@ flask==3.0.3 # flask-migrate # flask-restx # flask-sqlalchemy -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -72,19 +76,21 @@ gitdb==4.0.11 # via gitpython gitpython==3.1.43 # via mlflow-skinny -greenlet==3.0.3 +google-auth==2.36.0 + # via databricks-sdk +greenlet==3.1.1 # via sqlalchemy -gunicorn==22.0.0 +gunicorn==23.0.0 # via -r docker/pip-tools/restapi-requirements.in -idna==3.7 +idna==3.10 # via requests -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via flask-restx -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -94,57 +100,63 @@ jmespath==1.0.1 # via # boto3 # botocore -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -mako==1.3.5 +mako==1.3.6 # via alembic -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts -mlflow-skinny==2.14.2 +mlflow-skinny==2.18.0 # via dioptra (pyproject.toml) multimethod==1.12 # via dioptra (pyproject.toml) -numpy==2.0.0 +numpy==2.1.3 # via # dioptra (pyproject.toml) # pandas # scipy -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 +opentelemetry-sdk==1.28.2 # via mlflow-skinny -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # gunicorn # marshmallow # mlflow-skinny -pandas==2.2.2 +pandas==2.2.3 # via dioptra (pyproject.toml) passlib==1.7.4 # via dioptra (pyproject.toml) -pillow==10.4.0 +pillow==11.0.0 # via -r docker/pip-tools/restapi-requirements.in -protobuf==4.25.3 +protobuf==5.28.3 # via mlflow-skinny -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via -r docker/pip-tools/restapi-requirements.in -pyparsing==3.1.2 +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth +pyparsing==3.2.0 # via dioptra (pyproject.toml) python-dateutil==2.9.0.post0 # via @@ -153,16 +165,15 @@ python-dateutil==2.9.0.post0 # pandas python-json-logger==2.0.7 # via -r docker/pip-tools/restapi-requirements.in -pytz==2024.1 +pytz==2024.2 # via # flask-restx - # mlflow-skinny # pandas -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dioptra (pyproject.toml) # mlflow-skinny -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -172,32 +183,35 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # mlflow-skinny -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scipy==1.14.0 +scipy==1.14.1 # via dioptra (pyproject.toml) -simplejson==3.19.2 +simplejson==3.19.3 # via -r docker/pip-tools/restapi-requirements.in six==1.16.0 # via python-dateutil smmap==5.0.1 # via gitdb -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy -sqlparse==0.5.0 +sqlparse==0.5.2 # via mlflow-skinny -structlog==24.2.0 +structlog==24.4.0 # via dioptra (pyproject.toml) typing-extensions==4.12.2 # via @@ -205,20 +219,20 @@ typing-extensions==4.12.2 # dioptra (pyproject.toml) # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask # flask-accepts # flask-login # flask-restx -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata diff --git a/docker/requirements/linux-amd64-py3.11-tensorflow2-cpu-requirements.txt b/docker/requirements/linux-amd64-py3.11-tensorflow2-cpu-requirements.txt index 7caef6c92..3a286ed7b 100644 --- a/docker/requirements/linux-amd64-py3.11-tensorflow2-cpu-requirements.txt +++ b/docker/requirements/linux-amd64-py3.11-tensorflow2-cpu-requirements.txt @@ -9,38 +9,40 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -alembic==1.13.2 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene + # via flask-restx astunparse==1.6.3 # via tensorflow -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -48,38 +50,40 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -deprecated==1.2.14 - # via opentelemetry-api -distributed==2024.6.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distributed==2024.11.2 # via prefect docker==7.1.0 # via # mlflow # prefect entrypoints==0.4 - # via - # dioptra (pyproject.toml) - # mlflow -flask==3.0.3 + # via dioptra (pyproject.toml) +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -88,9 +92,9 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -106,56 +110,60 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib -fsspec==2024.6.1 +fsspec==2024.10.0 # via dask gast==0.6.0 # via tensorflow gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -idna==3.7 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +idna==3.10 # via requests -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -170,15 +178,15 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -188,20 +196,24 @@ locket==1.0.0 # via # distributed # partd -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts @@ -209,12 +221,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools mdurl==0.1.2 # via markdown-it-py @@ -222,9 +233,11 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect @@ -234,9 +247,9 @@ mypy-extensions==1.0.0 # via prefect namex==0.0.8 # via keras -networkx==3.3 +networkx==3.4.2 # via scikit-image -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -244,20 +257,18 @@ numpy==1.26.4 # contourpy # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -273,22 +284,20 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras -packaging==24.1 +packaging==24.2 # via # dask # distributed @@ -297,11 +306,11 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # prefect # scikit-image # tensorflow -pandas==2.2.2 +pandas==2.2.3 # via # dioptra (pyproject.toml) # mlflow @@ -311,7 +320,7 @@ passlib==1.7.4 # via dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -322,28 +331,32 @@ pillow==10.4.0 # smqtk-image-io prefect==1.4.1 # via dioptra (pyproject.toml) -protobuf==4.25.3 +protobuf==4.25.5 # via - # mlflow + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via distributed -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk pygments==2.18.0 # via rich -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra (pyproject.toml) # matplotlib @@ -354,6 +367,7 @@ python-dateutil==2.9.0.post0 # botocore # croniter # dioptra (pyproject.toml) + # graphene # matplotlib # pandas # pendulum @@ -363,25 +377,20 @@ python-json-logger==2.0.7 # via dioptra (pyproject.toml) python-slugify==8.0.4 # via prefect -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dask # dioptra (pyproject.toml) # distributed - # mlflow + # mlflow-skinny # prefect -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -391,54 +400,57 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # docker - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # tensorflow -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # astunparse # google-pasta + # heart-library # imgaug # python-dateutil - # querystring-parser # tensorboard # tensorflow smmap==5.0.1 @@ -472,15 +484,15 @@ smqtk-image-io==0.17.1 # smqtk-detection sortedcontainers==2.4.0 # via distributed -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow -structlog==24.2.0 +sqlparse==0.5.2 + # via mlflow-skinny +structlog==24.4.0 # via dioptra (pyproject.toml) tabulate==0.9.0 # via prefect @@ -496,49 +508,52 @@ tensorflow==2.16.1 # via -r requirements-dev-tensorflow.in tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum toml==0.10.2 # via prefect -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via distributed -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # nrtk typing-extensions==4.12.2 # via # alembic # dioptra (pyproject.toml) + # graphene + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed # docker # prefect # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask @@ -546,15 +561,15 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via astunparse -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/docker/requirements/linux-amd64-py3.11-tensorflow2-gpu-requirements.txt b/docker/requirements/linux-amd64-py3.11-tensorflow2-gpu-requirements.txt index d48408eff..74d2104a5 100644 --- a/docker/requirements/linux-amd64-py3.11-tensorflow2-gpu-requirements.txt +++ b/docker/requirements/linux-amd64-py3.11-tensorflow2-gpu-requirements.txt @@ -9,38 +9,40 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -alembic==1.13.2 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene + # via flask-restx astunparse==1.6.3 # via tensorflow -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -48,38 +50,40 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -deprecated==1.2.14 - # via opentelemetry-api -distributed==2024.6.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distributed==2024.11.2 # via prefect docker==7.1.0 # via # mlflow # prefect entrypoints==0.4 - # via - # dioptra (pyproject.toml) - # mlflow -flask==3.0.3 + # via dioptra (pyproject.toml) +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -88,9 +92,9 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -106,56 +110,60 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib -fsspec==2024.6.1 +fsspec==2024.10.0 # via dask gast==0.6.0 # via tensorflow gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -idna==3.7 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +idna==3.10 # via requests -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -170,15 +178,15 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -188,20 +196,24 @@ locket==1.0.0 # via # distributed # partd -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts @@ -209,12 +221,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools mdurl==0.1.2 # via markdown-it-py @@ -222,9 +233,11 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect @@ -234,9 +247,9 @@ mypy-extensions==1.0.0 # via prefect namex==0.0.8 # via keras -networkx==3.3 +networkx==3.4.2 # via scikit-image -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -244,20 +257,18 @@ numpy==1.26.4 # contourpy # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -307,22 +318,20 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras -packaging==24.1 +packaging==24.2 # via # dask # distributed @@ -331,11 +340,11 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # prefect # scikit-image # tensorflow -pandas==2.2.2 +pandas==2.2.3 # via # dioptra (pyproject.toml) # mlflow @@ -345,7 +354,7 @@ passlib==1.7.4 # via dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -356,28 +365,32 @@ pillow==10.4.0 # smqtk-image-io prefect==1.4.1 # via dioptra (pyproject.toml) -protobuf==4.25.3 +protobuf==4.25.5 # via - # mlflow + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via distributed -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk pygments==2.18.0 # via rich -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra (pyproject.toml) # matplotlib @@ -388,6 +401,7 @@ python-dateutil==2.9.0.post0 # botocore # croniter # dioptra (pyproject.toml) + # graphene # matplotlib # pandas # pendulum @@ -397,25 +411,20 @@ python-json-logger==2.0.7 # via dioptra (pyproject.toml) python-slugify==8.0.4 # via prefect -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dask # dioptra (pyproject.toml) # distributed - # mlflow + # mlflow-skinny # prefect -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -425,54 +434,57 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # docker - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # tensorflow -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # astunparse # google-pasta + # heart-library # imgaug # python-dateutil - # querystring-parser # tensorboard # tensorflow smmap==5.0.1 @@ -506,15 +518,15 @@ smqtk-image-io==0.17.1 # smqtk-detection sortedcontainers==2.4.0 # via distributed -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow -structlog==24.2.0 +sqlparse==0.5.2 + # via mlflow-skinny +structlog==24.4.0 # via dioptra (pyproject.toml) tabulate==0.9.0 # via prefect @@ -530,49 +542,52 @@ tensorflow[and-cuda]==2.16.1 ; sys_platform == "linux" and (platform_machine == # via -r requirements-dev-tensorflow-gpu.in tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum toml==0.10.2 # via prefect -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via distributed -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # nrtk typing-extensions==4.12.2 # via # alembic # dioptra (pyproject.toml) + # graphene + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed # docker # prefect # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask @@ -580,15 +595,15 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via astunparse -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/docker/requirements/linux-arm64-py3.11-mlflow-tracking-requirements.txt b/docker/requirements/linux-arm64-py3.11-mlflow-tracking-requirements.txt index b5b10f3fc..6e63861ec 100644 --- a/docker/requirements/linux-arm64-py3.11-mlflow-tracking-requirements.txt +++ b/docker/requirements/linux-arm64-py3.11-mlflow-tracking-requirements.txt @@ -4,65 +4,69 @@ # # pip-compile --output-file=venvs/linux-arm64-py3.11-mlflow-tracking-requirements.txt docker/pip-tools/mlflow-tracking-requirements.in # -alembic==1.13.2 +alembic==1.14.0 # via mlflow -aniso8601==9.0.1 - # via graphene -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via -r docker/pip-tools/mlflow-tracking-requirements.in -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via # flask - # mlflow -cloudpickle==3.0.0 - # via mlflow -contourpy==1.2.1 + # mlflow-skinny +cloudpickle==3.1.0 + # via mlflow-skinny +contourpy==1.3.1 # via matplotlib cycler==0.12.1 # via matplotlib -deprecated==1.2.14 - # via opentelemetry-api +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions docker==7.1.0 # via mlflow -entrypoints==0.4 - # via mlflow -flask==3.0.3 +flask==3.1.0 # via mlflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -idna==3.7 +idna==3.10 # via requests -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via - # mlflow + # mlflow-skinny # opentelemetry-api itsdangerous==2.2.0 # via flask @@ -76,112 +80,118 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib -mako==1.3.5 +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via mlflow -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -matplotlib==3.9.1 +matplotlib==3.9.2 # via mlflow -mlflow==2.14.2 +mlflow==2.18.0 # via -r docker/pip-tools/mlflow-tracking-requirements.in -numpy==1.26.4 +mlflow-skinny==2.18.0 + # via mlflow +numpy==2.1.3 # via # contourpy # matplotlib # mlflow # pandas - # pyarrow # scikit-learn # scipy -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # gunicorn # matplotlib - # mlflow -pandas==2.2.2 + # mlflow-skinny +pandas==2.2.3 # via mlflow -pillow==10.4.0 +pillow==11.0.0 # via matplotlib -protobuf==4.25.3 - # via mlflow -psycopg2-binary==2.9.9 +protobuf==5.28.3 + # via mlflow-skinny +psycopg2-binary==2.9.10 # via -r docker/pip-tools/mlflow-tracking-requirements.in -pyarrow==15.0.2 +pyarrow==18.1.0 # via mlflow -pyparsing==3.1.2 +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth +pyparsing==3.2.0 # via matplotlib python-dateutil==2.9.0.post0 # via # botocore + # graphene # matplotlib # pandas -pytz==2024.1 - # via - # mlflow - # pandas -pyyaml==6.0.1 - # via mlflow -querystring-parser==1.2.4 - # via mlflow +pytz==2024.2 + # via pandas +pyyaml==6.0.2 + # via mlflow-skinny requests==2.32.3 # via + # databricks-sdk # docker - # mlflow -s3transfer==0.10.2 + # mlflow-skinny +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via mlflow -scipy==1.14.0 +scipy==1.14.1 # via # mlflow # scikit-learn -simplejson==3.19.2 +simplejson==3.19.3 # via -r docker/pip-tools/mlflow-tracking-requirements.in six==1.16.0 - # via - # python-dateutil - # querystring-parser + # via python-dateutil smmap==5.0.1 # via gitdb -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny threadpoolctl==3.5.0 # via scikit-learn typing-extensions==4.12.2 # via # alembic + # graphene # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # docker # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via flask -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata diff --git a/docker/requirements/linux-arm64-py3.11-pytorch-cpu-requirements.txt b/docker/requirements/linux-arm64-py3.11-pytorch-cpu-requirements.txt index 10fec1a4c..b6ce309ad 100644 --- a/docker/requirements/linux-arm64-py3.11-pytorch-cpu-requirements.txt +++ b/docker/requirements/linux-arm64-py3.11-pytorch-cpu-requirements.txt @@ -8,36 +8,38 @@ absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -alembic==1.13.2 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -async-timeout==4.0.3 + # via flask-restx +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -45,40 +47,42 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -deprecated==1.2.14 - # via opentelemetry-api -distributed==2024.6.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distributed==2024.11.2 # via prefect docker==7.1.0 # via # mlflow # prefect entrypoints==0.4 - # via - # dioptra (pyproject.toml) - # mlflow -filelock==3.15.4 + # via dioptra (pyproject.toml) +filelock==3.16.1 # via torch -flask==3.0.3 +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -87,9 +91,9 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -103,48 +107,52 @@ flask-sqlalchemy==3.1.1 # via # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -idna==3.7 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +idna==3.10 # via requests -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -160,13 +168,13 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -174,20 +182,24 @@ locket==1.0.0 # via # distributed # partd -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts @@ -195,20 +207,21 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools mdurl==0.1.2 # via markdown-it-py -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect @@ -216,28 +229,27 @@ multimethod==1.12 # via dioptra (pyproject.toml) mypy-extensions==1.0.0 # via prefect -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via # adversarial-robustness-toolbox # contourpy # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -253,18 +265,16 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # dask # distributed @@ -272,10 +282,11 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # prefect # scikit-image -pandas==2.2.2 + # tensorboard +pandas==2.2.3 # via # dioptra (pyproject.toml) # mlflow @@ -285,7 +296,7 @@ passlib==1.7.4 # via dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -297,27 +308,31 @@ pillow==10.4.0 # torchvision prefect==1.4.1 # via dioptra (pyproject.toml) -protobuf==4.25.3 +protobuf==5.28.3 # via - # mlflow + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via distributed -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk pygments==2.18.0 # via rich -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra (pyproject.toml) # matplotlib @@ -328,6 +343,7 @@ python-dateutil==2.9.0.post0 # botocore # croniter # dioptra (pyproject.toml) + # graphene # matplotlib # pandas # pendulum @@ -337,25 +353,20 @@ python-json-logger==2.0.7 # via dioptra (pyproject.toml) python-slugify==8.0.4 # via prefect -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dask # dioptra (pyproject.toml) # distributed - # mlflow + # mlflow-skinny # prefect -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -365,49 +376,52 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # docker - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox + # heart-library # imgaug # python-dateutil - # querystring-parser # tensorboard smmap==5.0.1 # via gitdb @@ -440,23 +454,23 @@ smqtk-image-io==0.17.1 # smqtk-detection sortedcontainers==2.4.0 # via distributed -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow -structlog==24.2.0 +sqlparse==0.5.2 + # via mlflow-skinny +structlog==24.4.0 # via dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -464,13 +478,13 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum toml==0.10.2 # via prefect -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -484,31 +498,34 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via distributed -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # nrtk typing-extensions==4.12.2 # via # alembic # dioptra (pyproject.toml) + # graphene + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed # docker # prefect # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask @@ -516,11 +533,11 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/docker/requirements/linux-arm64-py3.11-restapi-requirements.txt b/docker/requirements/linux-arm64-py3.11-restapi-requirements.txt index f057c23e4..47a5cf037 100644 --- a/docker/requirements/linux-arm64-py3.11-restapi-requirements.txt +++ b/docker/requirements/linux-arm64-py3.11-restapi-requirements.txt @@ -4,31 +4,33 @@ # # pip-compile --extra=mlflow-skinny --output-file=venvs/linux-arm64-py3.11-restapi-requirements.txt docker/pip-tools/restapi-requirements.in pyproject.toml # -alembic==1.13.2 +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate aniso8601==9.0.1 # via flask-restx -async-timeout==4.0.3 +async-timeout==5.0.1 # via -r docker/pip-tools/restapi-requirements.in -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow-skinny -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -36,15 +38,17 @@ click==8.1.7 # flask # mlflow-skinny # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via mlflow-skinny -deprecated==1.2.14 - # via opentelemetry-api -entrypoints==0.4 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 # via - # dioptra (pyproject.toml) - # mlflow-skinny -flask==3.0.3 + # opentelemetry-api + # opentelemetry-semantic-conventions +entrypoints==0.4 + # via dioptra (pyproject.toml) +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -52,9 +56,9 @@ flask==3.0.3 # flask-migrate # flask-restx # flask-sqlalchemy -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -72,19 +76,21 @@ gitdb==4.0.11 # via gitpython gitpython==3.1.43 # via mlflow-skinny -greenlet==3.0.3 +google-auth==2.36.0 + # via databricks-sdk +greenlet==3.1.1 # via sqlalchemy -gunicorn==22.0.0 +gunicorn==23.0.0 # via -r docker/pip-tools/restapi-requirements.in -idna==3.7 +idna==3.10 # via requests -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via flask-restx -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -94,57 +100,63 @@ jmespath==1.0.1 # via # boto3 # botocore -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -mako==1.3.5 +mako==1.3.6 # via alembic -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts -mlflow-skinny==2.14.2 +mlflow-skinny==2.18.0 # via dioptra (pyproject.toml) multimethod==1.12 # via dioptra (pyproject.toml) -numpy==2.0.0 +numpy==2.1.3 # via # dioptra (pyproject.toml) # pandas # scipy -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 +opentelemetry-sdk==1.28.2 # via mlflow-skinny -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -packaging==24.1 +packaging==24.2 # via # gunicorn # marshmallow # mlflow-skinny -pandas==2.2.2 +pandas==2.2.3 # via dioptra (pyproject.toml) passlib==1.7.4 # via dioptra (pyproject.toml) -pillow==10.4.0 +pillow==11.0.0 # via -r docker/pip-tools/restapi-requirements.in -protobuf==4.25.3 +protobuf==5.28.3 # via mlflow-skinny -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via -r docker/pip-tools/restapi-requirements.in -pyparsing==3.1.2 +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth +pyparsing==3.2.0 # via dioptra (pyproject.toml) python-dateutil==2.9.0.post0 # via @@ -153,16 +165,15 @@ python-dateutil==2.9.0.post0 # pandas python-json-logger==2.0.7 # via -r docker/pip-tools/restapi-requirements.in -pytz==2024.1 +pytz==2024.2 # via # flask-restx - # mlflow-skinny # pandas -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dioptra (pyproject.toml) # mlflow-skinny -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -172,32 +183,35 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # mlflow-skinny -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scipy==1.14.0 +scipy==1.14.1 # via dioptra (pyproject.toml) -simplejson==3.19.2 +simplejson==3.19.3 # via -r docker/pip-tools/restapi-requirements.in six==1.16.0 # via python-dateutil smmap==5.0.1 # via gitdb -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy -sqlparse==0.5.0 +sqlparse==0.5.2 # via mlflow-skinny -structlog==24.2.0 +structlog==24.4.0 # via dioptra (pyproject.toml) typing-extensions==4.12.2 # via @@ -205,20 +219,20 @@ typing-extensions==4.12.2 # dioptra (pyproject.toml) # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via pandas -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask # flask-accepts # flask-login # flask-restx -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata diff --git a/docker/requirements/linux-arm64-py3.11-tensorflow2-cpu-requirements.txt b/docker/requirements/linux-arm64-py3.11-tensorflow2-cpu-requirements.txt index 132ab9896..f073a23a1 100644 --- a/docker/requirements/linux-arm64-py3.11-tensorflow2-cpu-requirements.txt +++ b/docker/requirements/linux-arm64-py3.11-tensorflow2-cpu-requirements.txt @@ -9,38 +9,40 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -alembic==1.13.2 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +alembic==1.14.0 # via # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene + # via flask-restx astunparse==1.6.3 # via tensorflow -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # jsonschema # referencing -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -cachetools==5.3.3 - # via mlflow -certifi==2024.7.4 +cachetools==5.5.0 + # via + # google-auth + # mlflow-skinny +certifi==2024.8.30 # via requests -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -48,38 +50,40 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -deprecated==1.2.14 - # via opentelemetry-api -distributed==2024.6.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distributed==2024.11.2 # via prefect docker==7.1.0 # via # mlflow # prefect entrypoints==0.4 - # via - # dioptra (pyproject.toml) - # mlflow -flask==3.0.3 + # via dioptra (pyproject.toml) +flask==3.1.0 # via # dioptra (pyproject.toml) # flask-cors @@ -88,9 +92,9 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via dioptra (pyproject.toml) flask-login==0.6.3 # via dioptra (pyproject.toml) @@ -106,56 +110,60 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib -fsspec==2024.6.1 +fsspec==2024.10.0 # via dask gast==0.6.0 # via tensorflow gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -idna==3.7 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +idna==3.10 # via requests -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect -injector==0.21.0 +injector==0.22.0 # via dioptra (pyproject.toml) itsdangerous==2.2.0 # via flask @@ -170,15 +178,15 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -jsonschema==4.22.0 +jsonschema==4.23.0 # via # dioptra (pyproject.toml) # flask-restx -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -188,20 +196,24 @@ locket==1.0.0 # via # distributed # partd -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra (pyproject.toml) # flask-accepts @@ -209,12 +221,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools mdurl==0.1.2 # via markdown-it-py @@ -222,9 +233,11 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect @@ -234,9 +247,9 @@ mypy-extensions==1.0.0 # via prefect namex==0.0.8 # via keras -networkx==3.3 +networkx==3.4.2 # via scikit-image -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -244,20 +257,18 @@ numpy==1.26.4 # contourpy # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -273,22 +284,20 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras -packaging==24.1 +packaging==24.2 # via # dask # distributed @@ -297,11 +306,11 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # prefect # scikit-image # tensorflow -pandas==2.2.2 +pandas==2.2.3 # via # dioptra (pyproject.toml) # mlflow @@ -311,7 +320,7 @@ passlib==1.7.4 # via dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -322,28 +331,32 @@ pillow==10.4.0 # smqtk-image-io prefect==1.4.1 # via dioptra (pyproject.toml) -protobuf==4.25.3 +protobuf==4.25.5 # via - # mlflow + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via distributed -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk pygments==2.18.0 # via rich -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra (pyproject.toml) # matplotlib @@ -354,6 +367,7 @@ python-dateutil==2.9.0.post0 # botocore # croniter # dioptra (pyproject.toml) + # graphene # matplotlib # pandas # pendulum @@ -363,25 +377,20 @@ python-json-logger==2.0.7 # via dioptra (pyproject.toml) python-slugify==8.0.4 # via prefect -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # dask # dioptra (pyproject.toml) # distributed - # mlflow + # mlflow-skinny # prefect -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra (pyproject.toml) # rq @@ -391,54 +400,57 @@ referencing==0.35.1 # jsonschema-specifications requests==2.32.3 # via + # databricks-sdk # dioptra (pyproject.toml) # docker - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # tensorflow -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # astunparse # google-pasta + # heart-library # imgaug # python-dateutil - # querystring-parser # tensorboard # tensorflow smmap==5.0.1 @@ -472,15 +484,15 @@ smqtk-image-io==0.17.1 # smqtk-detection sortedcontainers==2.4.0 # via distributed -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow -structlog==24.2.0 +sqlparse==0.5.2 + # via mlflow-skinny +structlog==24.4.0 # via dioptra (pyproject.toml) tabulate==0.9.0 # via prefect @@ -496,49 +508,52 @@ tensorflow==2.16.1 # via -r requirements-dev-tensorflow.in tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum toml==0.10.2 # via prefect -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via distributed -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # nrtk typing-extensions==4.12.2 # via # alembic # dioptra (pyproject.toml) + # graphene + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed # docker # prefect # requests -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra (pyproject.toml) # flask @@ -546,15 +561,15 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via astunparse -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/docker/shellscripts/healthcheck-worker.m4 b/docker/shellscripts/healthcheck-worker.m4 index b0f695902..b6c96d4ea 100644 --- a/docker/shellscripts/healthcheck-worker.m4 +++ b/docker/shellscripts/healthcheck-worker.m4 @@ -16,8 +16,7 @@ set -euo pipefail ${DEBUG:+-x} # Global parameters ########################################################################################### -SHORT_CMD="python" -LONG_CMD="python -m dioptra.rq.cli.rq worker" +CMD="python /opt/venv/bin/dioptra-worker-v1" LOGNAME="Worker Healthcheck" ########################################################################################### @@ -63,19 +62,19 @@ log_info() { ########################################################################################### __get_num_procs() { - ps -C "${SHORT_CMD}" --no-headers -o cmd | grep -E "^${LONG_CMD}" | wc -l + ps -ax | grep "${CMD}" | grep -v grep | wc -l } healthcheck_process() { local num_procs if ! num_procs="$(__get_num_procs)"; then - log_error "Polling of ${_arg_cmd} with ps failed." + log_error "Polling of ${$CMD} with ps failed." exit 1 fi if ((num_procs != 1)); then - log_error "Process count for ${_arg_cmd} is ${num_procs} instead of 1." 1>&2 + log_error "Process count for ${CMD} is ${num_procs} instead of 1." 1>&2 exit 1 fi } diff --git a/pyproject.toml b/pyproject.toml index 4313aebc4..9665befa8 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -132,7 +132,9 @@ postgres = [ worker = [ "async_timeout", "adversarial-robustness-toolbox>=1.9.0", + "heart-library>=0.4.2", "imgaug>=0.4.0", + "maite>=0.6.0", "matplotlib", "nrtk>=0.3.0", "opencv-python", diff --git a/requirements/linux-amd64-py3.11-requirements-dev-pytorch.txt b/requirements/linux-amd64-py3.11-requirements-dev-pytorch.txt index 79914dc33..0c7ab8f23 100644 --- a/requirements/linux-amd64-py3.11-requirements-dev-pytorch.txt +++ b/requirements/linux-amd64-py3.11-requirements-dev-pytorch.txt @@ -10,25 +10,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -44,9 +46,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -56,7 +58,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -66,45 +68,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -114,15 +117,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -130,31 +133,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -166,28 +173,27 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # torch # tox # triton # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -197,11 +203,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -223,17 +229,17 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch @@ -241,35 +247,39 @@ fsspec==2024.6.1 gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -277,18 +287,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -298,20 +308,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -334,11 +343,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -346,17 +355,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -369,12 +377,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -383,21 +390,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -407,21 +415,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -430,12 +442,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -447,15 +458,17 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -479,17 +492,17 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -497,17 +510,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -544,7 +556,7 @@ nvidia-cusparse-cu12==12.1.0.106 # torch nvidia-nccl-cu12==2.19.3 # via torch -nvidia-nvjitlink-cu12==12.5.82 +nvidia-nvjitlink-cu12==12.6.85 # via # nvidia-cusolver-cu12 # nvidia-cusparse-cu12 @@ -554,20 +566,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -580,17 +590,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -609,7 +618,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -621,7 +630,7 @@ pillow==10.4.0 # torchvision pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -633,39 +642,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -683,23 +700,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -716,6 +732,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -734,16 +751,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -751,22 +765,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -779,12 +786,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -796,52 +804,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -883,46 +892,46 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -934,17 +943,17 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -958,7 +967,7 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -967,11 +976,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -990,29 +1000,31 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole triton==2.2.0 # via torch -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -1020,11 +1032,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1032,7 +1044,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1041,19 +1053,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/linux-amd64-py3.11-requirements-dev-tensorflow.txt b/requirements/linux-amd64-py3.11-requirements-dev-tensorflow.txt index 1102a6095..5e7af143f 100644 --- a/requirements/linux-amd64-py3.11-requirements-dev-tensorflow.txt +++ b/requirements/linux-amd64-py3.11-requirements-dev-tensorflow.txt @@ -11,25 +11,27 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -47,9 +49,9 @@ astunparse==1.6.3 # via tensorflow async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -59,7 +61,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -69,45 +71,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -117,15 +120,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -133,31 +136,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -169,26 +176,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -198,11 +204,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -226,17 +232,17 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib @@ -245,43 +251,47 @@ gast==0.6.0 gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -289,18 +299,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -310,20 +320,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -345,11 +354,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -357,17 +366,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -380,12 +388,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -394,23 +401,24 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -422,21 +430,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -445,12 +457,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -466,13 +477,15 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -498,15 +511,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -515,20 +528,18 @@ numpy==1.26.4 # dioptra # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -544,24 +555,22 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -575,18 +584,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx # tensorflow # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -605,7 +612,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -616,7 +623,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -628,40 +635,48 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==4.25.5 + # via + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -679,23 +694,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -712,6 +726,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -730,16 +745,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -747,22 +759,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -775,12 +780,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -793,56 +799,57 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens # astunparse - # bleach # google-pasta + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard # tensorflow @@ -885,36 +892,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -932,7 +939,7 @@ tensorflow==2.16.1 # via -r requirements-dev-tensorflow.in tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow terminado==0.18.1 # via @@ -942,22 +949,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -966,11 +973,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -989,28 +997,30 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -1018,11 +1028,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1030,7 +1040,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1039,22 +1049,22 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # astunparse # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/linux-amd64-py3.11-requirements-dev.txt b/requirements/linux-amd64-py3.11-requirements-dev.txt index 885fc6d6b..b1af5616e 100644 --- a/requirements/linux-amd64-py3.11-requirements-dev.txt +++ b/requirements/linux-amd64-py3.11-requirements-dev.txt @@ -8,25 +8,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -42,9 +44,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -54,7 +56,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -64,45 +66,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -112,15 +115,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -128,31 +131,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -164,26 +171,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -193,11 +199,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -219,52 +225,56 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -272,18 +282,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -293,20 +303,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -328,11 +337,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -340,17 +349,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -363,12 +371,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -377,21 +384,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -401,21 +409,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -424,12 +436,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -441,13 +452,15 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -471,15 +484,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -487,17 +500,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -512,20 +524,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -538,17 +548,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -567,7 +576,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -578,7 +587,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -590,39 +599,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -640,23 +657,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -673,6 +689,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -691,16 +708,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -708,22 +722,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -736,12 +743,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -753,52 +761,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -840,36 +849,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -877,7 +886,7 @@ tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -889,22 +898,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -913,11 +922,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -936,26 +946,28 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -963,11 +975,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -975,7 +987,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -984,19 +996,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/linux-arm64-py3.11-requirements-dev-pytorch.txt b/requirements/linux-arm64-py3.11-requirements-dev-pytorch.txt index 7acf963fe..6ca0cffe2 100644 --- a/requirements/linux-arm64-py3.11-requirements-dev-pytorch.txt +++ b/requirements/linux-arm64-py3.11-requirements-dev-pytorch.txt @@ -10,25 +10,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -44,9 +46,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -56,7 +58,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -66,45 +68,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -114,15 +117,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -130,31 +133,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -166,27 +173,26 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # torch # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -196,11 +202,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -222,17 +228,17 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch @@ -240,35 +246,39 @@ fsspec==2024.6.1 gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -276,18 +286,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -297,20 +307,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -333,11 +342,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -345,17 +354,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -368,12 +376,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -382,21 +389,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -406,21 +414,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -429,12 +441,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -446,15 +457,17 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -478,17 +491,17 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -496,17 +509,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -522,20 +534,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -548,17 +558,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -577,7 +586,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -589,7 +598,7 @@ pillow==10.4.0 # torchvision pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -601,39 +610,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -651,23 +668,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -684,6 +700,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -702,16 +719,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -719,22 +733,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -747,12 +754,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -764,52 +772,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -851,46 +860,46 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -902,17 +911,17 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -926,7 +935,7 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -935,11 +944,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -958,27 +968,29 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -986,11 +998,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -998,7 +1010,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1007,19 +1019,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/linux-arm64-py3.11-requirements-dev-tensorflow.txt b/requirements/linux-arm64-py3.11-requirements-dev-tensorflow.txt index 0f7629c9f..d35eb0bb3 100644 --- a/requirements/linux-arm64-py3.11-requirements-dev-tensorflow.txt +++ b/requirements/linux-arm64-py3.11-requirements-dev-tensorflow.txt @@ -11,25 +11,27 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -47,9 +49,9 @@ astunparse==1.6.3 # via tensorflow async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -59,7 +61,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -69,45 +71,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -117,15 +120,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -133,31 +136,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -169,26 +176,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -198,11 +204,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -226,17 +232,17 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib @@ -245,43 +251,47 @@ gast==0.6.0 gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -289,18 +299,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -310,20 +320,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -345,11 +354,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -357,17 +366,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -380,12 +388,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -394,23 +401,24 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -422,21 +430,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -445,12 +457,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -466,13 +477,15 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -498,15 +511,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -515,20 +528,18 @@ numpy==1.26.4 # dioptra # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -544,24 +555,22 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -575,18 +584,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx # tensorflow # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -605,7 +612,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -616,7 +623,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -628,40 +635,48 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==4.25.5 + # via + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -679,23 +694,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -712,6 +726,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -730,16 +745,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -747,22 +759,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -775,12 +780,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -793,56 +799,57 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens # astunparse - # bleach # google-pasta + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard # tensorflow @@ -885,36 +892,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -932,7 +939,7 @@ tensorflow==2.16.1 # via -r requirements-dev-tensorflow.in tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow terminado==0.18.1 # via @@ -942,22 +949,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -966,11 +973,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -989,28 +997,30 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -1018,11 +1028,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1030,7 +1040,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1039,22 +1049,22 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # astunparse # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/linux-arm64-py3.11-requirements-dev.txt b/requirements/linux-arm64-py3.11-requirements-dev.txt index 916a96416..92211fa37 100644 --- a/requirements/linux-arm64-py3.11-requirements-dev.txt +++ b/requirements/linux-arm64-py3.11-requirements-dev.txt @@ -8,25 +8,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -42,9 +44,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -54,7 +56,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -64,45 +66,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -112,15 +115,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -128,31 +131,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -164,26 +171,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -193,11 +199,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -219,52 +225,56 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -272,18 +282,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -293,20 +303,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -328,11 +337,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -340,17 +349,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -363,12 +371,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -377,21 +384,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -401,21 +409,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -424,12 +436,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -441,13 +452,15 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -471,15 +484,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -487,17 +500,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -512,20 +524,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -538,17 +548,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -567,7 +576,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -578,7 +587,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -590,39 +599,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -640,23 +657,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -673,6 +689,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -691,16 +708,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -708,22 +722,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -736,12 +743,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -753,52 +761,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -840,36 +849,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -877,7 +886,7 @@ tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -889,22 +898,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -913,11 +922,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -936,26 +946,28 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -963,11 +975,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -975,7 +987,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -984,19 +996,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/macos-amd64-py3.11-requirements-dev-pytorch.txt b/requirements/macos-amd64-py3.11-requirements-dev-pytorch.txt index 98d3e3d9b..5ef377d77 100644 --- a/requirements/macos-amd64-py3.11-requirements-dev-pytorch.txt +++ b/requirements/macos-amd64-py3.11-requirements-dev-pytorch.txt @@ -10,25 +10,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -46,9 +48,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -58,7 +60,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -68,45 +70,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -116,15 +119,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -132,31 +135,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -168,27 +175,26 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # torch # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -198,11 +204,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -224,17 +230,17 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch @@ -242,35 +248,39 @@ fsspec==2024.6.1 gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -278,18 +288,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -299,20 +309,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -335,11 +344,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -347,17 +356,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -370,12 +378,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -384,21 +391,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -408,21 +416,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -431,12 +443,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -448,15 +459,17 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -480,17 +493,17 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -498,17 +511,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -524,20 +536,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -550,17 +560,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -579,7 +588,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -591,7 +600,7 @@ pillow==10.4.0 # torchvision pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -603,39 +612,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -653,23 +670,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -686,6 +702,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -704,16 +721,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -721,22 +735,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -749,12 +756,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -766,52 +774,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -853,46 +862,46 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -904,17 +913,17 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -928,7 +937,7 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -937,11 +946,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -960,27 +970,29 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -988,11 +1000,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1000,7 +1012,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1009,19 +1021,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/macos-amd64-py3.11-requirements-dev-tensorflow.txt b/requirements/macos-amd64-py3.11-requirements-dev-tensorflow.txt index 8b85fe511..2e36d072a 100644 --- a/requirements/macos-amd64-py3.11-requirements-dev-tensorflow.txt +++ b/requirements/macos-amd64-py3.11-requirements-dev-tensorflow.txt @@ -11,25 +11,27 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -49,9 +51,9 @@ astunparse==1.6.3 # via tensorflow async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -61,7 +63,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -71,45 +73,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -119,15 +122,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -135,31 +138,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -171,26 +178,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -200,11 +206,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -228,17 +234,17 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib @@ -247,43 +253,47 @@ gast==0.6.0 gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -291,18 +301,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -312,20 +322,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -347,11 +356,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -359,17 +368,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -382,12 +390,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -396,23 +403,24 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -424,21 +432,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -447,12 +459,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -468,13 +479,15 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -500,15 +513,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -517,20 +530,18 @@ numpy==1.26.4 # dioptra # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -546,24 +557,22 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -577,18 +586,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx # tensorflow # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -607,7 +614,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -618,7 +625,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -630,40 +637,48 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==4.25.5 + # via + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -681,23 +696,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -714,6 +728,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -732,16 +747,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -749,22 +761,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -777,12 +782,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -795,56 +801,57 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens # astunparse - # bleach # google-pasta + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard # tensorflow @@ -887,36 +894,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -934,7 +941,7 @@ tensorflow==2.16.1 # via -r requirements-dev-tensorflow.in tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow terminado==0.18.1 # via @@ -944,22 +951,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -968,11 +975,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -991,28 +999,30 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -1020,11 +1030,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1032,7 +1042,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1041,22 +1051,22 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # astunparse # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/macos-amd64-py3.11-requirements-dev.txt b/requirements/macos-amd64-py3.11-requirements-dev.txt index 301aba06e..17429bf20 100644 --- a/requirements/macos-amd64-py3.11-requirements-dev.txt +++ b/requirements/macos-amd64-py3.11-requirements-dev.txt @@ -8,25 +8,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -44,9 +46,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -56,7 +58,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -66,45 +68,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -114,15 +117,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -130,31 +133,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -166,26 +173,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -195,11 +201,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -221,52 +227,56 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -274,18 +284,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -295,20 +305,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -330,11 +339,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -342,17 +351,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -365,12 +373,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -379,21 +386,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -403,21 +411,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -426,12 +438,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -443,13 +454,15 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -473,15 +486,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -489,17 +502,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -514,20 +526,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -540,17 +550,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -569,7 +578,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -580,7 +589,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -592,39 +601,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -642,23 +659,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -675,6 +691,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -693,16 +710,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -710,22 +724,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -738,12 +745,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -755,52 +763,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -842,36 +851,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -879,7 +888,7 @@ tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -891,22 +900,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -915,11 +924,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -938,26 +948,28 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -965,11 +977,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -977,7 +989,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -986,19 +998,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/macos-arm64-py3.11-requirements-dev-pytorch.txt b/requirements/macos-arm64-py3.11-requirements-dev-pytorch.txt index b69764ed8..3f75d6519 100644 --- a/requirements/macos-arm64-py3.11-requirements-dev-pytorch.txt +++ b/requirements/macos-arm64-py3.11-requirements-dev-pytorch.txt @@ -10,25 +10,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -46,9 +48,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -58,7 +60,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -68,45 +70,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -116,15 +119,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -132,31 +135,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -168,27 +175,26 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # torch # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -198,11 +204,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -224,17 +230,17 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch @@ -242,33 +248,37 @@ fsspec==2024.6.1 gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -276,18 +286,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -297,20 +307,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -333,11 +342,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -345,17 +354,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -368,12 +376,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -382,21 +389,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -406,21 +414,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -429,12 +441,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -446,15 +457,17 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -478,17 +491,17 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -496,17 +509,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -522,20 +534,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -548,17 +558,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -577,7 +586,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -589,7 +598,7 @@ pillow==10.4.0 # torchvision pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -601,39 +610,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -651,23 +668,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -684,6 +700,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -702,16 +719,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -719,22 +733,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -747,12 +754,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -764,52 +772,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -851,46 +860,46 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -902,17 +911,17 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -926,7 +935,7 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -935,11 +944,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -958,27 +968,29 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -986,11 +998,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -998,7 +1010,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1007,19 +1019,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/macos-arm64-py3.11-requirements-dev-tensorflow.txt b/requirements/macos-arm64-py3.11-requirements-dev-tensorflow.txt index 68a02ee1c..3a84c7e7c 100644 --- a/requirements/macos-arm64-py3.11-requirements-dev-tensorflow.txt +++ b/requirements/macos-arm64-py3.11-requirements-dev-tensorflow.txt @@ -11,25 +11,27 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -49,9 +51,9 @@ astunparse==1.6.3 # via tensorflow async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -61,7 +63,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -71,45 +73,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -119,15 +122,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -135,31 +138,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -171,26 +178,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -200,11 +206,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -228,17 +234,17 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib @@ -247,41 +253,45 @@ gast==0.6.0 gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -289,18 +299,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -310,20 +320,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -345,11 +354,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -357,17 +366,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -380,12 +388,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -394,23 +401,24 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -keras==3.4.1 +keras==3.6.0 # via tensorflow -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -422,21 +430,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -445,12 +457,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -466,13 +477,15 @@ ml-dtypes==0.3.2 # via # keras # tensorflow -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -498,15 +511,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -515,20 +528,18 @@ numpy==1.26.4 # dioptra # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -544,24 +555,22 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow -optree==0.12.0 +optree==0.13.1 # via keras overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -575,18 +584,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx # tensorflow # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -605,7 +612,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -616,7 +623,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -628,40 +635,48 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==4.25.5 + # via + # mlflow-skinny # tensorboard # tensorflow -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -679,23 +694,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -712,6 +726,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -730,16 +745,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -747,22 +759,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -775,12 +780,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -793,56 +799,57 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens # astunparse - # bleach # google-pasta + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard # tensorflow @@ -886,36 +893,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -935,7 +942,7 @@ tensorflow-io-gcs-filesystem==0.37.1 # via tensorflow tensorflow-metal==1.1.0 ; sys_platform == "darwin" and (platform_machine == "aarch64" or platform_machine == "arm64") # via -r requirements-dev-tensorflow.in -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow terminado==0.18.1 # via @@ -945,22 +952,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -969,11 +976,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -992,28 +1000,30 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -1021,11 +1031,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1033,7 +1043,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1042,23 +1052,23 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # astunparse # dioptra (pyproject.toml) # pip-tools # tensorflow-metal -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/macos-arm64-py3.11-requirements-dev.txt b/requirements/macos-arm64-py3.11-requirements-dev.txt index 2dabbc108..8e63ac428 100644 --- a/requirements/macos-arm64-py3.11-requirements-dev.txt +++ b/requirements/macos-arm64-py3.11-requirements-dev.txt @@ -8,25 +8,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -44,9 +46,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -56,7 +58,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -66,45 +68,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -114,15 +117,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via tox @@ -130,31 +133,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -166,26 +173,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -195,11 +201,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -221,50 +227,54 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard -gunicorn==22.0.0 +gunicorn==23.0.0 # via mlflow h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -272,18 +282,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -293,20 +303,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -328,11 +337,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -340,17 +349,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -363,12 +371,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -377,21 +384,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -401,21 +409,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -424,12 +436,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -441,13 +452,15 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -471,15 +484,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -487,17 +500,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -512,20 +524,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -538,17 +548,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -567,7 +576,7 @@ pendulum==3.0.0 # via prefect pexpect==4.9.0 # via ipython -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -578,7 +587,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -590,39 +599,47 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) ptyprocess==0.7.0 # via # pexpect # terminado -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -640,23 +657,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -673,6 +689,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -691,16 +708,13 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -708,22 +722,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -736,12 +743,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -753,52 +761,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -840,36 +849,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -877,7 +886,7 @@ tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -889,22 +898,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -913,11 +922,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -936,26 +946,28 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -963,11 +975,11 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -975,7 +987,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -984,19 +996,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/win-amd64-py3.11-requirements-dev-pytorch.txt b/requirements/win-amd64-py3.11-requirements-dev-pytorch.txt index 56c2e6eba..070f25f06 100644 --- a/requirements/win-amd64-py3.11-requirements-dev-pytorch.txt +++ b/requirements/win-amd64-py3.11-requirements-dev-pytorch.txt @@ -10,25 +10,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -44,9 +46,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -56,7 +58,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -66,45 +68,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -114,15 +117,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via @@ -137,31 +140,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -173,27 +180,26 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # torch # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -203,11 +209,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -229,17 +235,17 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # torch @@ -247,33 +253,37 @@ fsspec==2024.6.1 gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -281,18 +291,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -302,20 +312,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -338,11 +347,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -350,17 +359,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -373,12 +381,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -387,21 +394,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -411,21 +419,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -434,12 +446,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -451,15 +462,17 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) +mlflow-skinny==2.18.0 + # via mlflow mpmath==1.3.0 # via sympy -msgpack==1.0.8 +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -483,17 +496,17 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via # scikit-image # torch -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -501,17 +514,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -527,20 +539,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -552,17 +562,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -579,7 +588,7 @@ passlib==1.7.4 # dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -591,7 +600,7 @@ pillow==10.4.0 # torchvision pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -603,35 +612,43 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -649,23 +666,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -682,6 +698,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -700,25 +717,22 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pywin32==306 +pywin32==308 # via # docker # jupyter-core -pywinpty==2.0.13 +pywinpty==2.0.14 # via # jupyter-server # jupyter-server-terminals # terminado -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -726,22 +740,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -754,12 +761,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -771,52 +779,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -858,46 +867,46 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) -sympy==1.12.1 +sympy==1.13.3 # via torch tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -909,17 +918,17 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed @@ -933,7 +942,7 @@ torchaudio==2.2.2 # via -r requirements-dev-pytorch.in torchvision==0.17.2 # via -r requirements-dev-pytorch.in -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -942,11 +951,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -965,27 +975,29 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy # torch -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -993,13 +1005,13 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox -waitress==3.0.0 +waitress==3.0.2 # via mlflow wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1007,7 +1019,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1016,19 +1028,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/win-amd64-py3.11-requirements-dev-tensorflow.txt b/requirements/win-amd64-py3.11-requirements-dev-tensorflow.txt index 2144ed37f..ccb18ae5e 100644 --- a/requirements/win-amd64-py3.11-requirements-dev-tensorflow.txt +++ b/requirements/win-amd64-py3.11-requirements-dev-tensorflow.txt @@ -11,25 +11,27 @@ absl-py==2.1.0 # keras # tensorboard # tensorflow-intel -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -47,9 +49,9 @@ astunparse==1.6.3 # via tensorflow-intel async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -59,7 +61,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -69,45 +71,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -117,15 +120,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via @@ -140,31 +143,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -176,26 +183,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -205,11 +211,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -233,17 +239,17 @@ flask-sqlalchemy==3.1.1 # flask-migrate flatbuffers==24.3.25 # via tensorflow-intel -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib @@ -252,41 +258,45 @@ gast==0.6.0 gitdb==4.0.11 # via gitpython gitpython==3.1.43 - # via mlflow + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk google-pasta==0.2.0 # via tensorflow-intel -graphene==3.3 +graphene==3.4.3 # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via # tensorboard # tensorflow-intel h11==0.14.0 # via httpcore -h5py==3.11.0 +h5py==3.12.1 # via # keras # tensorflow-intel -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -294,18 +304,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -315,20 +325,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -350,11 +359,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -362,17 +371,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -385,12 +393,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -399,23 +406,24 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -keras==3.4.1 +keras==3.6.0 # via tensorflow-intel -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -427,21 +435,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -450,12 +462,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -471,13 +482,15 @@ ml-dtypes==0.3.2 # via # keras # tensorflow-intel -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -503,15 +516,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -520,20 +533,18 @@ numpy==1.26.4 # dioptra # dioptra (pyproject.toml) # h5py + # heart-library # imageio # imgaug # keras + # maite # matplotlib # ml-dtypes # mlflow # nrtk # opencv-python - # opt-einsum # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -549,24 +560,22 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk -opt-einsum==3.3.0 +opt-einsum==3.4.0 # via tensorflow-intel -optree==0.12.0 +optree==0.13.1 # via keras overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -579,18 +588,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx # tensorflow-intel # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -607,7 +614,7 @@ passlib==1.7.4 # dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -618,7 +625,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -630,36 +637,44 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==4.25.5 + # via + # mlflow-skinny # tensorboard # tensorflow-intel -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -677,23 +692,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -710,6 +724,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -728,25 +743,22 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pywin32==306 +pywin32==308 # via # docker # jupyter-core -pywinpty==2.0.13 +pywinpty==2.0.14 # via # jupyter-server # jupyter-server-terminals # terminado -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -754,22 +766,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -782,12 +787,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -800,56 +806,57 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via # dioptra (pyproject.toml) # keras -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens # astunparse - # bleach # google-pasta + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard # tensorflow-intel @@ -892,36 +899,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -941,7 +948,7 @@ tensorflow-intel==2.16.1 # via tensorflow tensorflow-io-gcs-filesystem==0.31.0 # via tensorflow-intel -termcolor==2.4.0 +termcolor==2.5.0 # via tensorflow-intel terminado==0.18.1 # via @@ -951,22 +958,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -975,11 +982,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -998,28 +1006,30 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # optree # sqlalchemy # tensorflow-intel -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -1027,13 +1037,13 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox -waitress==3.0.0 +waitress==3.0.2 # via mlflow wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -1041,7 +1051,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -1050,22 +1060,22 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # astunparse # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via # deprecated # tensorflow-intel -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/requirements/win-amd64-py3.11-requirements-dev.txt b/requirements/win-amd64-py3.11-requirements-dev.txt index 3fa923b23..da187d33f 100644 --- a/requirements/win-amd64-py3.11-requirements-dev.txt +++ b/requirements/win-amd64-py3.11-requirements-dev.txt @@ -8,25 +8,27 @@ # via -r requirements-dev.in absl-py==2.1.0 # via tensorboard -adversarial-robustness-toolbox==1.18.1 - # via dioptra (pyproject.toml) -aiohttp==3.9.5 +adversarial-robustness-toolbox==1.18.2 + # via + # dioptra (pyproject.toml) + # heart-library +aiohappyeyeballs==2.4.3 + # via aiohttp +aiohttp==3.11.7 # via dioptra (pyproject.toml) aiosignal==1.3.1 # via aiohttp alabaster==0.7.16 # via sphinx -alembic==1.13.2 +alembic==1.14.0 # via # dioptra # dioptra (pyproject.toml) # flask-migrate # mlflow aniso8601==9.0.1 - # via - # flask-restx - # graphene -anyio==4.4.0 + # via flask-restx +anyio==4.6.2.post1 # via # httpx # jupyter-server @@ -42,9 +44,9 @@ asttokens==2.4.1 # via stack-data async-lru==2.0.4 # via jupyterlab -async-timeout==4.0.3 +async-timeout==5.0.1 # via dioptra (pyproject.toml) -attrs==23.2.0 +attrs==24.2.0 # via # aiohttp # cattrs @@ -54,7 +56,7 @@ attrs==23.2.0 # referencing autopep8==2.3.1 # via dioptra (pyproject.toml) -babel==2.15.0 +babel==2.16.0 # via # jupyterlab-server # sphinx @@ -64,45 +66,46 @@ binaryornot==0.4.4 # via # cookiecutter # dioptra (pyproject.toml) -bleach==6.1.0 +bleach==6.2.0 # via # kaggle # nbconvert -blinker==1.8.2 +blinker==1.9.0 # via flask -boto3==1.34.139 +boto3==1.35.69 # via # dioptra # dioptra (pyproject.toml) -botocore==1.34.139 +botocore==1.35.69 # via # boto3 # s3transfer -build==1.2.1 +build==1.2.2.post1 # via # dioptra (pyproject.toml) # pip-tools -cachetools==5.3.3 +cachetools==5.5.0 # via - # mlflow + # google-auth + # mlflow-skinny # tox -cattrs==23.2.3 +cattrs==24.1.2 # via # lsprotocol # pygls -certifi==2024.7.4 +certifi==2024.8.30 # via # httpcore # httpx # kaggle # requests -cffi==1.16.0 +cffi==1.17.1 # via argon2-cffi-bindings chardet==5.2.0 # via # binaryornot # tox -charset-normalizer==3.3.2 +charset-normalizer==3.4.0 # via requests click==8.1.7 # via @@ -112,15 +115,15 @@ click==8.1.7 # dioptra (pyproject.toml) # distributed # flask - # mlflow + # mlflow-skinny # pip-tools # prefect # rq -cloudpickle==3.0.0 +cloudpickle==3.1.0 # via # dask # distributed - # mlflow + # mlflow-skinny # prefect colorama==0.4.6 # via @@ -135,31 +138,35 @@ comm==0.2.2 # via # ipykernel # ipywidgets -contourpy==1.2.1 +contourpy==1.3.1 # via matplotlib cookiecutter==2.1.1 # via # dioptra (pyproject.toml) # pytest-cookies -croniter==2.0.5 +croniter==5.0.1 # via prefect cycler==0.12.1 # via matplotlib -dask==2024.6.2 +dask==2024.11.2 # via # distributed # prefect -debugpy==1.8.2 +databricks-sdk==0.38.0 + # via mlflow-skinny +debugpy==1.8.9 # via ipykernel decorator==5.1.1 # via ipython defusedxml==0.7.1 # via nbconvert -deprecated==1.2.14 - # via opentelemetry-api -distlib==0.3.8 +deprecated==1.2.15 + # via + # opentelemetry-api + # opentelemetry-semantic-conventions +distlib==0.3.9 # via virtualenv -distributed==2024.6.2 +distributed==2024.11.2 # via prefect docker==7.1.0 # via @@ -171,26 +178,25 @@ entrypoints==0.4 # via # dioptra # dioptra (pyproject.toml) - # mlflow -esbonio==0.16.4 +esbonio==0.16.5 # via dioptra (pyproject.toml) -executing==2.0.1 +executing==2.1.0 # via stack-data -faker==26.0.0 +faker==33.0.0 # via dioptra (pyproject.toml) fastjsonschema==2.20.0 # via nbformat -filelock==3.15.4 +filelock==3.16.1 # via # tox # virtualenv -flake8==7.1.0 +flake8==7.1.1 # via # dioptra (pyproject.toml) # flake8-bugbear -flake8-bugbear==24.4.26 +flake8-bugbear==24.10.31 # via dioptra (pyproject.toml) -flask==3.0.3 +flask==3.1.0 # via # dioptra # dioptra (pyproject.toml) @@ -200,11 +206,11 @@ flask==3.0.3 # flask-restx # flask-sqlalchemy # mlflow -flask-accepts==0.18.4 +flask-accepts==1.0.1 # via # dioptra # dioptra (pyproject.toml) -flask-cors==4.0.1 +flask-cors==5.0.0 # via # dioptra # dioptra (pyproject.toml) @@ -226,50 +232,54 @@ flask-sqlalchemy==3.1.1 # dioptra # dioptra (pyproject.toml) # flask-migrate -fonttools==4.53.1 +fonttools==4.55.0 # via matplotlib fqdn==1.5.1 # via jsonschema freezegun==1.5.1 # via dioptra (pyproject.toml) -frozenlist==1.4.1 +frozenlist==1.5.0 # via # aiohttp # aiosignal -fsspec==2024.6.1 +fsspec==2024.10.0 # via # dask # universal-pathlib gitdb==4.0.11 # via gitpython gitpython==3.1.43 + # via mlflow-skinny +google-auth==2.36.0 + # via databricks-sdk +graphene==3.4.3 # via mlflow -graphene==3.3 - # via mlflow -graphql-core==3.2.3 +graphql-core==3.2.5 # via # graphene # graphql-relay graphql-relay==3.2.0 # via graphene -greenlet==3.0.3 +greenlet==3.1.1 # via sqlalchemy -grpcio==1.64.1 +grpcio==1.68.0 # via tensorboard h11==0.14.0 # via httpcore -httpcore==1.0.5 +heart-library==0.4.4 + # via dioptra (pyproject.toml) +httpcore==1.0.7 # via httpx -httpx==0.27.0 +httpx==0.27.2 # via jupyterlab -idna==3.7 +idna==3.10 # via # anyio # httpx # jsonschema # requests # yarl -imageio==2.34.2 +imageio==2.36.0 # via # imgaug # scikit-image @@ -277,18 +287,18 @@ imagesize==1.4.1 # via sphinx imgaug==0.4.0 # via dioptra (pyproject.toml) -importlib-metadata==7.1.0 +importlib-metadata==8.5.0 # via # dask - # mlflow + # mlflow-skinny # opentelemetry-api -importlib-resources==6.4.0 +importlib-resources==6.4.5 # via # flask-restx # prefect iniconfig==2.0.0 # via pytest -injector==0.21.0 +injector==0.22.0 # via # dioptra # dioptra (pyproject.toml) @@ -298,20 +308,19 @@ ipykernel==6.29.5 # jupyter # jupyter-console # jupyterlab - # qtconsole -ipython==8.26.0 +ipython==8.29.0 # via # dioptra (pyproject.toml) # ipykernel # ipywidgets # jupyter-console -ipywidgets==8.1.3 +ipywidgets==8.1.5 # via jupyter isoduration==20.11.0 # via jsonschema itsdangerous==2.2.0 # via flask -jedi==0.19.1 +jedi==0.19.2 # via ipython jinja2==3.1.4 # via @@ -333,11 +342,11 @@ jmespath==1.0.1 # botocore joblib==1.4.2 # via scikit-learn -json5==0.9.25 +json5==0.9.28 # via jupyterlab-server jsonpointer==3.0.0 # via jsonschema -jsonschema[format-nongpl]==4.22.0 +jsonschema[format-nongpl]==4.23.0 # via # dioptra # dioptra (pyproject.toml) @@ -345,17 +354,16 @@ jsonschema[format-nongpl]==4.22.0 # jupyter-events # jupyterlab-server # nbformat -jsonschema-specifications==2023.12.1 +jsonschema-specifications==2024.10.1 # via jsonschema -jupyter==1.0.0 +jupyter==1.1.1 # via dioptra (pyproject.toml) -jupyter-client==8.6.2 +jupyter-client==8.6.3 # via # ipykernel # jupyter-console # jupyter-server # nbclient - # qtconsole jupyter-console==6.6.3 # via jupyter jupyter-core==5.7.2 @@ -368,12 +376,11 @@ jupyter-core==5.7.2 # nbclient # nbconvert # nbformat - # qtconsole jupyter-events==0.10.0 # via jupyter-server jupyter-lsp==2.2.5 # via jupyterlab -jupyter-server==2.14.1 +jupyter-server==2.14.2 # via # jupyter-lsp # jupyterlab @@ -382,21 +389,22 @@ jupyter-server==2.14.1 # notebook-shim jupyter-server-terminals==0.5.3 # via jupyter-server -jupyterlab==4.2.3 +jupyterlab==4.2.6 # via # dioptra (pyproject.toml) + # jupyter # notebook jupyterlab-pygments==0.3.0 # via nbconvert -jupyterlab-server==2.27.2 +jupyterlab-server==2.27.3 # via # jupyterlab # notebook -jupyterlab-widgets==3.0.11 +jupyterlab-widgets==3.0.13 # via ipywidgets -kaggle==1.6.14 +kaggle==1.6.17 # via dioptra (pyproject.toml) -kiwisolver==1.4.5 +kiwisolver==1.4.7 # via matplotlib lazy-loader==0.4 # via scikit-image @@ -406,21 +414,25 @@ locket==1.0.0 # partd lsprotocol==2023.0.1 # via pygls -mako==1.3.5 +maite==0.6.1 + # via + # dioptra (pyproject.toml) + # heart-library +mako==1.3.6 # via alembic -markdown==3.6 +markdown==3.7 # via # mlflow # tensorboard markdown-it-py==3.0.0 # via rich -markupsafe==2.1.5 +markupsafe==3.0.2 # via # jinja2 # mako # nbconvert # werkzeug -marshmallow==3.21.3 +marshmallow==3.23.1 # via # dioptra # dioptra (pyproject.toml) @@ -429,12 +441,11 @@ marshmallow==3.21.3 # prefect marshmallow-oneofschema==3.1.1 # via prefect -matplotlib==3.9.1 +matplotlib==3.9.2 # via # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # pycocotools matplotlib-inline==0.1.7 # via @@ -446,13 +457,15 @@ mdurl==0.1.2 # via markdown-it-py mistune==3.0.2 # via nbconvert -mlflow==2.14.2 +mlflow==2.18.0 # via dioptra (pyproject.toml) -msgpack==1.0.8 +mlflow-skinny==2.18.0 + # via mlflow +msgpack==1.1.0 # via # distributed # prefect -multidict==6.0.5 +multidict==6.1.0 # via # aiohttp # yarl @@ -476,15 +489,15 @@ nbformat==5.10.4 # nbconvert nest-asyncio==1.6.0 # via ipykernel -networkx==3.3 +networkx==3.4.2 # via scikit-image -notebook==7.2.1 +notebook==7.2.2 # via jupyter notebook-shim==0.2.4 # via # jupyterlab # notebook -nrtk==0.8.1 +nrtk==0.16.0 # via dioptra (pyproject.toml) numpy==1.26.4 # via @@ -492,17 +505,16 @@ numpy==1.26.4 # contourpy # dioptra # dioptra (pyproject.toml) + # heart-library # imageio # imgaug + # maite # matplotlib # mlflow # nrtk # opencv-python # pandas - # pyarrow - # pybsm # pycocotools - # pywavelets # scikit-image # scikit-learn # scipy @@ -517,20 +529,18 @@ opencv-python==4.10.0.84 # via # dioptra (pyproject.toml) # imgaug - # nrtk - # pybsm -opentelemetry-api==1.25.0 +opentelemetry-api==1.28.2 # via - # mlflow + # mlflow-skinny # opentelemetry-sdk # opentelemetry-semantic-conventions -opentelemetry-sdk==1.25.0 - # via mlflow -opentelemetry-semantic-conventions==0.46b0 +opentelemetry-sdk==1.28.2 + # via mlflow-skinny +opentelemetry-semantic-conventions==0.49b2 # via opentelemetry-sdk overrides==7.7.0 # via jupyter-server -packaging==24.1 +packaging==24.2 # via # build # dask @@ -542,17 +552,16 @@ packaging==24.1 # lazy-loader # marshmallow # matplotlib - # mlflow + # mlflow-skinny # nbconvert # prefect # pyproject-api # pytest - # qtconsole - # qtpy # scikit-image # sphinx + # tensorboard # tox -pandas==2.2.2 +pandas==2.2.3 # via # dioptra # dioptra (pyproject.toml) @@ -569,7 +578,7 @@ passlib==1.7.4 # dioptra (pyproject.toml) pendulum==3.0.0 # via prefect -pillow==10.4.0 +pillow==11.0.0 # via # dioptra (pyproject.toml) # imageio @@ -580,7 +589,7 @@ pillow==10.4.0 # smqtk-image-io pip-tools==7.4.1 # via dioptra (pyproject.toml) -platformdirs==4.2.2 +platformdirs==4.3.6 # via # esbonio # jupyter-core @@ -592,35 +601,43 @@ pluggy==1.5.0 # tox prefect==1.4.1 # via dioptra (pyproject.toml) -prometheus-client==0.20.0 +prometheus-client==0.21.0 # via jupyter-server -prompt-toolkit==3.0.47 +prompt-toolkit==3.0.48 # via # ipython # jupyter-console -protobuf==4.25.3 +propcache==0.2.0 # via - # mlflow + # aiohttp + # yarl +protobuf==5.28.3 + # via + # mlflow-skinny # tensorboard -psutil==6.0.0 +psutil==6.1.0 # via # distributed # ipykernel -psycopg2-binary==2.9.9 +psycopg2-binary==2.9.10 # via dioptra (pyproject.toml) -pure-eval==0.2.2 +pure-eval==0.2.3 # via stack-data -pyarrow==15.0.2 +pyarrow==18.1.0 # via # dioptra (pyproject.toml) # mlflow -pybsm==0.5.1 - # via nrtk +pyasn1==0.6.1 + # via + # pyasn1-modules + # rsa +pyasn1-modules==0.4.1 + # via google-auth pycocotools==2.0.8 # via # dioptra (pyproject.toml) # nrtk -pycodestyle==2.12.0 +pycodestyle==2.12.1 # via # autopep8 # dioptra (pyproject.toml) @@ -638,23 +655,22 @@ pygments==2.18.0 # ipython # jupyter-console # nbconvert - # qtconsole # rich # sphinx -pyparsing==3.1.2 +pyparsing==3.2.0 # via # dioptra # dioptra (pyproject.toml) # matplotlib -pyproject-api==1.7.1 +pyproject-api==1.8.0 # via tox -pyproject-hooks==1.1.0 +pyproject-hooks==1.2.0 # via # build # pip-tools pyspellchecker==0.8.1 # via esbonio -pytest==8.2.2 +pytest==8.3.3 # via # dioptra (pyproject.toml) # pytest-cookies @@ -671,6 +687,7 @@ python-dateutil==2.9.0.post0 # dioptra (pyproject.toml) # faker # freezegun + # graphene # jupyter-client # kaggle # matplotlib @@ -689,25 +706,22 @@ python-slugify==8.0.4 # prefect pytoml==0.1.21 # via dioptra (pyproject.toml) -pytz==2024.1 +pytz==2024.2 # via # croniter # flask-restx - # mlflow # pandas # prefect -pywavelets==1.6.0 - # via scikit-image -pywin32==306 +pywin32==308 # via # docker # jupyter-core -pywinpty==2.0.13 +pywinpty==2.0.14 # via # jupyter-server # jupyter-server-terminals # terminado -pyyaml==6.0.1 +pyyaml==6.0.2 # via # cookiecutter # dask @@ -715,22 +729,15 @@ pyyaml==6.0.1 # dioptra (pyproject.toml) # distributed # jupyter-events - # mlflow + # mlflow-skinny # prefect -pyzmq==26.0.3 +pyzmq==26.2.0 # via # ipykernel # jupyter-client # jupyter-console # jupyter-server - # qtconsole -qtconsole==5.5.2 - # via jupyter -qtpy==2.4.1 - # via qtconsole -querystring-parser==1.2.4 - # via mlflow -redis==5.0.7 +redis==5.2.0 # via # dioptra # dioptra (pyproject.toml) @@ -743,12 +750,13 @@ referencing==0.35.1 requests==2.32.3 # via # cookiecutter + # databricks-sdk # dioptra # dioptra (pyproject.toml) # docker # jupyterlab-server # kaggle - # mlflow + # mlflow-skinny # prefect # smqtk-dataprovider # sphinx @@ -760,52 +768,53 @@ rfc3986-validator==0.1.1 # via # jsonschema # jupyter-events -rich==13.7.1 +rich==13.9.4 # via dioptra (pyproject.toml) -rpds-py==0.18.1 +rpds-py==0.21.0 # via # jsonschema # referencing -rq==1.16.2 +rq==2.0.0 # via # dioptra # dioptra (pyproject.toml) -s3transfer==0.10.2 +rsa==4.9 + # via google-auth +s3transfer==0.10.4 # via boto3 -scikit-image==0.21.0 +scikit-image==0.24.0 # via # imgaug # nrtk -scikit-learn==1.5.1 +scikit-learn==1.5.2 # via # adversarial-robustness-toolbox # dioptra (pyproject.toml) + # heart-library # mlflow -scipy==1.13.1 +scipy==1.14.1 # via # adversarial-robustness-toolbox # dioptra # dioptra (pyproject.toml) # imgaug # mlflow - # pybsm # scikit-image # scikit-learn send2trash==1.8.3 # via jupyter-server -shapely==2.0.4 +shapely==2.0.6 # via imgaug -simplejson==3.19.2 +simplejson==3.19.3 # via dioptra (pyproject.toml) six==1.16.0 # via # adversarial-robustness-toolbox # asttokens - # bleach + # heart-library # imgaug # kaggle # python-dateutil - # querystring-parser # rfc3339-validator # tensorboard smmap==5.0.1 @@ -847,36 +856,36 @@ snowballstemmer==2.2.0 # sphinx sortedcontainers==2.4.0 # via distributed -soupsieve==2.5 +soupsieve==2.6 # via beautifulsoup4 sphinx==4.5.0 # via # dioptra (pyproject.toml) # esbonio -sphinxcontrib-applehelp==1.0.8 +sphinxcontrib-applehelp==2.0.0 # via sphinx -sphinxcontrib-devhelp==1.0.6 +sphinxcontrib-devhelp==2.0.0 # via sphinx -sphinxcontrib-htmlhelp==2.0.5 +sphinxcontrib-htmlhelp==2.1.0 # via sphinx sphinxcontrib-jsmath==1.0.1 # via sphinx -sphinxcontrib-qthelp==1.0.7 +sphinxcontrib-qthelp==2.0.0 # via sphinx -sphinxcontrib-serializinghtml==1.1.10 +sphinxcontrib-serializinghtml==2.0.0 # via sphinx -sqlalchemy==2.0.31 +sqlalchemy==2.0.36 # via # alembic # dioptra # dioptra (pyproject.toml) # flask-sqlalchemy # mlflow -sqlparse==0.5.0 - # via mlflow +sqlparse==0.5.2 + # via mlflow-skinny stack-data==0.6.3 # via ipython -structlog==24.2.0 +structlog==24.4.0 # via # dioptra # dioptra (pyproject.toml) @@ -884,7 +893,7 @@ tabulate==0.9.0 # via prefect tblib==3.0.0 # via distributed -tensorboard==2.17.0 +tensorboard==2.18.0 # via dioptra (pyproject.toml) tensorboard-data-server==0.7.2 # via tensorboard @@ -896,22 +905,22 @@ text-unidecode==1.3 # via python-slugify threadpoolctl==3.5.0 # via scikit-learn -tifffile==2024.7.2 +tifffile==2024.9.20 # via scikit-image -time-machine==2.14.2 +time-machine==2.16.0 # via pendulum -tinycss2==1.3.0 +tinycss2==1.4.0 # via nbconvert toml==0.10.2 # via prefect -tomli==2.0.1 +tomli==2.1.0 # via dioptra (pyproject.toml) -toolz==0.12.1 +toolz==1.0.0 # via # dask # distributed # partd -tornado==6.4.1 +tornado==6.4.2 # via # distributed # ipykernel @@ -920,11 +929,12 @@ tornado==6.4.1 # jupyterlab # notebook # terminado -tox==4.16.0 +tox==4.23.2 # via dioptra (pyproject.toml) -tqdm==4.66.4 +tqdm==4.67.1 # via # adversarial-robustness-toolbox + # heart-library # kaggle # nrtk traitlets==5.14.3 @@ -943,26 +953,28 @@ traitlets==5.14.3 # nbclient # nbconvert # nbformat - # qtconsole -types-python-dateutil==2.9.0.20240316 +types-python-dateutil==2.9.0.20241003 # via arrow typing-extensions==4.12.2 # via # alembic # dioptra # dioptra (pyproject.toml) + # faker + # graphene # ipython + # maite # opentelemetry-sdk # sqlalchemy -tzdata==2024.1 +tzdata==2024.2 # via # pandas # pendulum -universal-pathlib==0.2.2 +universal-pathlib==0.2.5 # via dioptra (pyproject.toml) uri-template==1.3.0 # via jsonschema -urllib3==2.2.2 +urllib3==2.2.3 # via # botocore # distributed @@ -970,13 +982,13 @@ urllib3==2.2.2 # kaggle # prefect # requests -virtualenv==20.26.3 +virtualenv==20.28.0 # via tox -waitress==3.0.0 +waitress==3.0.2 # via mlflow wcwidth==0.2.13 # via prompt-toolkit -webcolors==24.6.0 +webcolors==24.11.1 # via jsonschema webencodings==0.5.1 # via @@ -984,7 +996,7 @@ webencodings==0.5.1 # tinycss2 websocket-client==1.8.0 # via jupyter-server -werkzeug==3.0.3 +werkzeug==3.1.3 # via # dioptra # dioptra (pyproject.toml) @@ -993,19 +1005,19 @@ werkzeug==3.0.3 # flask-login # flask-restx # tensorboard -wheel==0.43.0 +wheel==0.45.1 # via # dioptra (pyproject.toml) # pip-tools -widgetsnbextension==4.0.11 +widgetsnbextension==4.0.13 # via ipywidgets -wrapt==1.16.0 +wrapt==1.17.0 # via deprecated -yarl==1.9.4 +yarl==1.18.0 # via aiohttp zict==3.0.0 # via distributed -zipp==3.19.2 +zipp==3.21.0 # via importlib-metadata # The following packages are considered to be unsafe in a requirements file: diff --git a/src/dioptra/client/__init__.py b/src/dioptra/client/__init__.py index 9c9274cfe..0da6a6511 100644 --- a/src/dioptra/client/__init__.py +++ b/src/dioptra/client/__init__.py @@ -14,8 +14,14 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from __future__ import annotations +from .client import ( + DioptraClient, + connect_json_dioptra_client, + connect_response_dioptra_client, +) -from ._client import DioptraClient - -__all__ = ["DioptraClient"] +__all__ = [ + "connect_response_dioptra_client", + "connect_json_dioptra_client", + "DioptraClient", +] diff --git a/src/dioptra/client/_client.py b/src/dioptra/client/_client.py deleted file mode 100644 index 176ea3ae9..000000000 --- a/src/dioptra/client/_client.py +++ /dev/null @@ -1,746 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -from __future__ import annotations - -import os -from pathlib import Path -from posixpath import join as urljoin -from typing import Any, cast -from urllib.parse import urlparse, urlunparse - -import requests - - -class DioptraClient(object): - """Connects to the Dioptra REST api, and provides access to endpoints. - - Args: - address: Address of the Dioptra REST api or if no address is given the - DIOPTRA_RESTAPI_URI environment variable is used. - api_version: The version of the Dioptra REST API to use. Defaults to "v0". - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html for - more information on Dioptra's REST api. - """ - - def __init__(self, address: str | None = None, api_version: str = "v0") -> None: - address = ( - f"{address}/api/{api_version}" - if address - else f"{os.environ['DIOPTRA_RESTAPI_URI']}/api/{api_version}" - ) - self._scheme, self._netloc, self._path, _, _, _ = urlparse(address) - - @property - def experiment_endpoint(self) -> str: - """Experiment endpoint url""" - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "experiment/"), "", "", "") - ) - - @property - def job_endpoint(self) -> str: - """Job endpoint url""" - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "job/"), "", "", "") - ) - - @property - def task_plugin_endpoint(self) -> str: - """Task plugins endpoint url""" - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "taskPlugin/"), "", "", "") - ) - - @property - def task_plugin_builtins_endpoint(self) -> str: - """Builtin task plugins endpoint url""" - return urlunparse( - ( - self._scheme, - self._netloc, - urljoin(self._path, "taskPlugin/dioptra_builtins"), - "", - "", - "", - ) - ) - - @property - def task_plugin_custom_endpoint(self) -> str: - """Custom task plugins endpoint url""" - return urlunparse( - ( - self._scheme, - self._netloc, - urljoin(self._path, "taskPlugin/dioptra_custom"), - "", - "", - "", - ) - ) - - @property - def queue_endpoint(self) -> str: - """Queue endpoint url""" - return urlunparse( - (self._scheme, self._netloc, urljoin(self._path, "queue/"), "", "", "") - ) - - def delete_custom_task_plugin(self, name: str) -> dict[str, Any]: - """Deletes a custom task plugin by its unique name. - - Args: - name: A unique string identifying a task plugin package within - dioptra_custom collection. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'collection': 'dioptra_custom', - 'status': 'Success', - 'taskPluginName': ['evaluation'] - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - plugin_name_query: str = urljoin(self.task_plugin_custom_endpoint, name) - result = cast(dict[str, Any], requests.delete(plugin_name_query).json()) - return result - - def get_experiment_by_id(self, id: int) -> dict[str, Any]: - """Gets an experiment by its unique identifier. - - Args: - id: An integer identifying a registered experiment. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'lastModified': '2023-06-22T13:42:35.379462', - 'experimentId': 10, - 'name': 'mnist_feature_squeezing', - 'createdOn': '2023-06-22T13:42:35.379462' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - experiment_id_query: str = urljoin(self.experiment_endpoint, str(id)) - return cast(dict[str, Any], requests.get(experiment_id_query).json()) - - def get_experiment_by_name(self, name: str) -> dict[str, Any]: - """Gets an experiment by its unique name. - - Args: - name: The name of the experiment. - - Returns: - The Dioptra REST api's response containing the experiment id, name, and - metadata. - - Example:: - - { - 'experimentId': 1, - 'name': 'mnist', - 'createdOn': '2023-06-22T13:42:35.379462', - 'lastModified': '2023-06-22T13:42:35.379462' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - experiment_name_query: str = urljoin(self.experiment_endpoint, "name", name) - return cast(dict[str, Any], requests.get(experiment_name_query).json()) - - def get_job_by_id(self, id: str) -> dict[str, Any]: - """Gets a job by its unique identifier. - - Args: - id: A string specifying a job's UUID. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'mlflowRunId': None, - 'lastModified': '2023-06-26T15:26:43.100093', - 'experimentId': 10, - 'queueId': 2, - 'workflowUri': 's3://workflow/268a7620/workflows.tar.gz', - 'entryPoint': 'train', - 'dependsOn': None, - 'status': 'queued', - 'timeout': '24h', - 'jobId': '4eb2305e-57c3-4867-a59f-1a1ecd2033d4', - 'entryPointKwargs': '-P model_architecture=shallow_net -P epochs=3', - 'createdOn': '2023-06-26T15:26:43.100093' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - job_id_query: str = urljoin(self.job_endpoint, id) - return cast(dict[str, Any], requests.get(job_id_query).json()) - - def get_queue_by_id(self, id: int) -> dict[str, Any]: - """Gets a queue by its unique identifier. - - Args: - id: An integer identifying a registered queue. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'lastModified': '2023-04-24T20:53:09.801442', - 'name': 'tensorflow_cpu', - 'queueId': 1, - 'createdOn': '2023-04-24T20:53:09.801442' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - queue_id_query: str = urljoin(self.queue_endpoint, str(id)) - return cast(dict[str, Any], requests.get(queue_id_query).json()) - - def get_queue_by_name(self, name: str) -> dict[str, Any]: - """Gets a queue by its unique name. - - Args: - name: The name of the queue. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'lastModified': '2023-04-24T20:53:09.801442', - 'name': 'tensorflow_cpu', - 'queueId': 1, - 'createdOn': '2023-04-24T20:53:09.801442' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - queue_name_query: str = urljoin(self.queue_endpoint, "name", name) - return cast(dict[str, Any], requests.get(queue_name_query).json()) - - def get_builtin_task_plugin(self, name: str) -> dict[str, Any]: - """Gets a custom builtin plugin by its unique name. - - Args: - name: A unique string identifying a task plugin package within - dioptra_builtins collection. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'taskPluginName': 'attacks', - 'collection': 'dioptra_builtins', - 'modules': ['__init__.py', 'fgm.py'] - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - task_plugin_name_query: str = urljoin(self.task_plugin_builtins_endpoint, name) - return cast(dict[str, Any], requests.get(task_plugin_name_query).json()) - - def get_custom_task_plugin(self, name: str) -> dict[str, Any]: - """Gets a custom task plugin by its unique name. - - Args: - name: A unique string identifying a task plugin package within - dioptra_builtins collection. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'taskPluginName': 'custom_poisoning_plugins', - 'collection': 'dioptra_custom', - 'modules': [ - '__init__.py', - 'attacks_poison.py', - 'data_tensorflow.py', - 'datasetup.py', - 'defenses_image_preprocessing.py', - 'defenses_training.py', - 'estimators_keras_classifiers.py', - 'registry_art.py', - 'tensorflow.py' - ] - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - task_plugin_name_query: str = urljoin(self.task_plugin_custom_endpoint, name) - return cast(dict[str, Any], requests.get(task_plugin_name_query).json()) - - def list_experiments(self) -> list[dict[str, Any]]: - """Gets a list of all registered experiments. - - Returns: - A list of responses detailing all experiments. - - Example:: - - [ - { - 'lastModified': '2023-04-24T20:20:27.315687', - 'experimentId': 1, - 'name': 'mnist', - 'createdOn': '2023-04-24T20:20:27.315687' - }, - ... - { - 'lastModified': '2023-06-22T13:42:35.379462', - 'experimentId': 10, - 'name': 'mnist_feature_squeezing', - 'createdOn': '2023-06-22T13:42:35.379462' - } - ] - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - return cast(list[dict[str, Any]], requests.get(self.experiment_endpoint).json()) - - def list_jobs(self) -> list[dict[str, Any]]: - """Gets a list of all submitted jobs. - - Returns: - A list of responses detailing all jobs. - - Example:: - - [ - { - 'mlflowRunId': None, - 'lastModified': '2023-04-24T20:54:30.722304', - 'experimentId': 2, - 'queueId': 2, - 'workflowUri': 's3://workflow/268a7620/workflows.tar.gz', - 'entryPoint': 'train', - 'dependsOn': None, - 'status': 'queued', - 'timeout': '1h', - 'jobId': 'a4c574dd-cbd1-43c9-9afe-17d69cd1c73d', - 'entryPointKwargs': '-P data_dir=/nfs/data/Mnist', - 'createdOn': '2023-04-24T20:54:30.722304' - }, - ... - ] - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - return cast(list[dict[str, Any]], requests.get(self.job_endpoint).json()) - - def list_queues(self) -> list[dict[str, Any]]: - """Gets a list of all registered queues. - - Returns: - A list of responses detailing all registered queues. - - Example:: - - [ - { - 'lastModified': '2023-04-24T20:53:09.801442', - 'name': 'tensorflow_cpu', - 'queueId': 1, - 'createdOn': '2023-04-24T20:53:09.801442' - }, - { - 'lastModified': '2023-04-24T20:53:09.824101', - 'name': 'tensorflow_gpu', - 'queueId': 2, - 'createdOn': '2023-04-24T20:53:09.824101' - }, - { - 'lastModified': '2023-04-24T20:53:09.867917', - 'name': 'pytorch_cpu', - 'queueId': 3, - 'createdOn': '2023-04-24T20:53:09.867917' - }, - { - 'lastModified': '2023-04-24T20:53:09.893451', - 'name': 'pytorch_gpu', - 'queueId': 4, - 'createdOn': '2023-04-24T20:53:09.893451' - } - ] - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - return cast(list[dict[str, Any]], requests.get(self.queue_endpoint).json()) - - def list_all_task_plugins(self) -> list[dict[str, Any]]: - """Gets a list of all registered builtin task plugins. - - Returns: - A list of responses detailing all plugins. - - Example:: - - [ - { - 'taskPluginName': 'artifacts', - 'collection': 'dioptra_builtins', - 'modules': ['__init__.py', 'mlflow.py', 'utils.py'] - }, - ... - { - 'taskPluginName': 'pixel_threshold', - 'collection': 'dioptra_custom', - 'modules': ['__init__.py', 'pixelthreshold.py'] - } - ] - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - - return cast( - list[dict[str, Any]], requests.get(self.task_plugin_endpoint).json() - ) - - def list_builtin_task_plugins(self) -> list[dict[str, Any]]: - """Gets a list of all registered builtin task plugins. - - Returns: - A list of responses detailing all builtin plugins. - - Example:: - - [ - { - 'taskPluginName': 'artifacts', - 'collection': 'dioptra_builtins', - 'modules': ['__init__.py', 'mlflow.py', 'utils.py'] - }, - ... - { - 'taskPluginName': 'backend_configs', - 'collection': 'dioptra_builtins', - 'modules': ['__init__.py', 'tensorflow.py'] - } - ] - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html for - more information on Dioptra's REST api. - """ - return cast( - list[dict[str, Any]], - requests.get(self.task_plugin_builtins_endpoint).json(), - ) - - def list_custom_task_plugins(self) -> list[dict[str, Any]]: - """Gets a list of all registered custom task plugins. - - Returns: - A list of responses detailing all custom plugins. - - Example:: - - [ - { - 'taskPluginName': 'model_inversion', - 'collection': 'dioptra_custom', - 'modules': ['__init__.py', 'modelinversion.py'] - }, - ... - { - 'taskPluginName': 'pixel_threshold', - 'collection': 'dioptra_custom', - 'modules': ['__init__.py', 'pixelthreshold.py'] - } - ] - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - return cast( - list[dict[str, Any]], requests.get(self.task_plugin_custom_endpoint).json() - ) - - def lock_queue(self, name: str) -> dict[str, Any]: - """Locks the queue (name reference) if it is unlocked. - - Args: - name: The name of the queue. - - Returns: - The Dioptra REST api's response. - - Example:: - - {'name': ['tensorflow_cpu'], 'status': 'Success'} - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - queue_name_query: str = urljoin(self.queue_endpoint, "name", name, "lock") - return cast(dict[str, Any], requests.put(queue_name_query).json()) - - def unlock_queue(self, name: str) -> dict[str, Any]: - """Removes the lock from the queue (name reference) if it exists. - - Args: - name: The name of the queue. - - Returns: - The Dioptra REST api's response. - - Example:: - - {'name': ['tensorflow_cpu'], 'status': 'Success'} - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - queue_name_query: str = urljoin(self.queue_endpoint, "name", name, "lock") - return cast(dict[str, Any], requests.delete(queue_name_query).json()) - - def register_experiment(self, name: str) -> dict[str, Any]: - """Creates a new experiment via an experiment registration form. - - Args: - name: The name to register as a new experiment. - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'lastModified': '2023-06-26T15:45:09.232878', - 'experimentId': 11, - 'name': 'experiment1234', - 'createdOn': '2023-06-26T15:45:09.232878' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - experiment_registration_form = {"name": name} - - response = requests.post( - self.experiment_endpoint, - json=experiment_registration_form, - ) - - return cast(dict[str, Any], response.json()) - - def register_queue(self, name: str = "tensorflow_cpu") -> dict[str, Any]: - """Creates a new queue via a queue registration form. - - Args: - name: The name to register as a new queue. Defaults to "tensorflow_cpu". - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'lastModified': '2023-06-26T15:48:47.662293', - 'name': 'queue', - 'queueId': 7, - 'createdOn': '2023-06-26T15:48:47.662293' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - queue_registration_form = {"name": name} - - response = requests.post( - self.queue_endpoint, - json=queue_registration_form, - ) - - return cast(dict[str, Any], response.json()) - - def submit_job( - self, - workflows_file: str | Path, - experiment_name: str, - entry_point: str, - entry_point_kwargs: str | None = None, - depends_on: str | None = None, - queue: str = "tensorflow_cpu", - timeout: str = "24h", - ) -> dict[str, Any]: - """Creates a new job via a job submission form with an attached file. - - Args: - workflows_file: A tarball archive or zip file containing, at a minimum, - a MLproject file and its associated entry point scripts. - experiment_name:The name of a registered experiment. - entry_point: Entrypoint name. - entry_point_kwargs: A string listing parameter values to pass to the - entry point for the job. The list of parameters is specified using the - following format: “-P param1=value1 -P param2=value2”. Defaults to None. - depends_on: A UUID for a previously submitted job to set as a dependency - for the current job. Defaults to None. - queue: Name of the queue the job is submitted to. Defaults to - "tensorflow_cpu". - timeout: The maximum alloted time for a job before it times out and is - stopped. Defaults to "24h". - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'createdOn': '2023-06-26T15:26:43.100093', - 'dependsOn': None, - 'entryPoint': 'train', - 'entryPointKwargs': '-P data_dir=/dioptra/data/Mnist', - 'experimentId': 10, - 'jobId': '4eb2305e-57c3-4867-a59f-1a1ecd2033d4', - 'lastModified': '2023-06-26T15:26:43.100093', - 'mlflowRunId': None, - 'queueId': 2, - 'status': 'queued', - 'timeout': '24h', - 'workflowUri': 's3://workflow/07d2c0a9/workflows.tar.gz' - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - job_form: dict[str, Any] = { - "experimentName": experiment_name, - "queue": queue, - "timeout": timeout, - "entryPoint": entry_point, - } - - if entry_point_kwargs is not None: - job_form["entryPointKwargs"] = entry_point_kwargs - - if depends_on is not None: - job_form["dependsOn"] = depends_on - - workflows_file = Path(workflows_file) - - with workflows_file.open("rb") as f: - job_files = {"workflow": (workflows_file.name, f)} - response = requests.post( - self.job_endpoint, - data=job_form, - files=job_files, - ) - - return cast(dict[str, Any], response.json()) - - def upload_custom_plugin_package( - self, - custom_plugin_name: str, - custom_plugin_file: str | Path, - collection: str = "dioptra_custom", - ) -> dict[str, Any]: - """Registers a new task plugin uploaded via the task plugin upload form. - - Args: - custom_plugin_name: Plugin name for the upload form. - custom_plugin_file: Path to custom plugin. - collection: Collection to upload the plugin to. Defaults to - "dioptra_custom". - - Returns: - The Dioptra REST api's response. - - Example:: - - { - 'taskPluginName': 'evaluation', - 'collection': 'dioptra_custom', - 'modules': [ - 'tensorflow.py', - 'import_keras.py', - '__init__.py' - ] - } - - Notes: - See https://pages.nist.gov/dioptra/user-guide/api-reference-restapi.html - for more information on Dioptra's REST api. - """ - plugin_upload_form = { - "taskPluginName": custom_plugin_name, - "collection": collection, - } - - custom_plugin_file = Path(custom_plugin_file) - - with custom_plugin_file.open("rb") as f: - custom_plugin_file_dict = {"taskPluginFile": (custom_plugin_file.name, f)} - response = requests.post( - self.task_plugin_endpoint, - data=plugin_upload_form, - files=custom_plugin_file_dict, - ) - - return cast(dict[str, Any], response.json()) diff --git a/src/dioptra/client/artifacts.py b/src/dioptra/client/artifacts.py new file mode 100644 index 000000000..b812fd854 --- /dev/null +++ b/src/dioptra/client/artifacts.py @@ -0,0 +1,165 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, TypeVar + +from .base import CollectionClient, DioptraSession +from .snapshots import SnapshotsSubCollectionClient + +T = TypeVar("T") + + +class ArtifactsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /artifacts collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "artifacts" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the ArtifactsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving artifact resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the artifact snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/artifacts/1/snapshots + client.artifacts.snapshots.get(1) + + # GET /api/v1/artifacts/1/snapshots/2 + client.artifacts.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of artifacts. + + Args: + group_id: The group id the artifacts belong to. If None, return artifacts + from all groups that the user has access to. Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of artifacts to return in the paged + response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for artifacts using the Dioptra API's query language. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, artifact_id: str | int) -> T: + """Get the artifact matching the provided id. + + Args: + artifact_id: The artifact id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(artifact_id)) + + def create( + self, group_id: int, job_id: str | int, uri: str, description: str | None = None + ) -> T: + """Creates an artifact. + + Args: + group_id: The id of the group that will own the artifact. + job_id: The id of the job that produced this artifact. + uri: The URI pointing to the location of the artifact. + description: The description of the new artifact. Optional, defaults to + None. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "group": group_id, + "job": job_id, + "uri": uri, + } + + if description is not None: + json_["description"] = description + + return self._session.post(self.url, json_=json_) + + def modify_by_id(self, artifact_id: str | int, description: str | None) -> T: + """Modify the artifact matching the provided id. + + Args: + artifact_id: The artifact id, an integer. + description: The new description of the artifact. To remove the description, + pass None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = {} + + if description is not None: + json_["description"] = description + + return self._session.put(self.url, str(artifact_id), json_=json_) diff --git a/src/dioptra/restapi/v1/tags/errors.py b/src/dioptra/client/auth.py similarity index 51% rename from src/dioptra/restapi/v1/tags/errors.py rename to src/dioptra/client/auth.py index 4ff36b234..0a1a6be4f 100644 --- a/src/dioptra/restapi/v1/tags/errors.py +++ b/src/dioptra/client/auth.py @@ -14,36 +14,45 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the tag endpoints.""" -from __future__ import annotations +from typing import ClassVar, TypeVar -from flask_restx import Api +from .base import CollectionClient +T = TypeVar("T") -class TagAlreadyExistsError(Exception): - """The tag name already exists.""" +class AuthCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /auth collection. -class TagDoesNotExistError(Exception): - """The requested tag does not exist.""" + Attributes: + name: The name of the collection. + """ + name: ClassVar[str] = "auth" -class TagSortError(Exception): - """The requested sortBy column is not a sortable field.""" + def login(self, username: str, password: str) -> T: + """Send a login request to the Dioptra API. + Args: + username: The username of the user. + password: The password of the user. -def register_error_handlers(api: Api) -> None: - @api.errorhandler(TagDoesNotExistError) - def handle_tag_does_not_exist_error(error): - return {"message": "Not Found - The requested tag does not exist"}, 404 + Returns: + The response from the Dioptra API. + """ + return self._session.post( + self.url, + "login", + json_={"username": username, "password": password}, + ) - @api.errorhandler(TagAlreadyExistsError) - def handle_tag_already_exists_error(error): - return {"message": "Bad Request - The tag name already exists."}, 400 + def logout(self, everywhere: bool = False) -> T: + """Send a logout request to the Dioptra API. - @api.errorhandler(TagSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) + Args: + everywhere: If True, log out from all sessions. + + Returns: + The response from the Dioptra API. + """ + return self._session.post(self.url, "logout", params={"everywhere": everywhere}) diff --git a/src/dioptra/client/base.py b/src/dioptra/client/base.py new file mode 100644 index 000000000..cb1467aba --- /dev/null +++ b/src/dioptra/client/base.py @@ -0,0 +1,496 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from abc import ABC, abstractmethod +from pathlib import Path +from posixpath import join as urljoin +from typing import Any, ClassVar, Generic, Protocol, TypeVar + +T = TypeVar("T") + + +class DioptraClientError(Exception): + """Base class for client errors""" + + +class FieldsValidationError(DioptraClientError): + """Raised when one or more fields are invalid.""" + + +class APIConnectionError(DioptraClientError): + """Class for connection errors""" + + +class StatusCodeError(DioptraClientError): + """Class for status code errors""" + + +class JSONDecodeError(DioptraClientError): + """Class for JSON decode errors""" + + +class IllegalArgumentError(DioptraClientError): + """Class for illegal argument errors""" + + +class SubCollectionUrlError(DioptraClientError): + """Class for errors in the sub-collection URL""" + + +class DioptraRequestProtocol(Protocol): + """The interface for a request to the Dioptra API.""" + + @property + def method(self) -> str: + """The HTTP method used in the request.""" + ... # fmt: skip + + @property + def url(self) -> str: + """The URL the request was made to.""" + ... # fmt: skip + + +class DioptraResponseProtocol(Protocol): + """The interface for a response from the Dioptra API.""" + + @property + def request(self) -> DioptraRequestProtocol: + """The request that generated the response.""" + ... # fmt: skip + + @property + def status_code(self) -> int: + """The HTTP status code of the response.""" + ... # fmt: skip + + @property + def text(self) -> str: + """The response body as a string.""" + ... # fmt: skip + + def json(self) -> dict[str, Any]: + """Return the response body as a JSON-like Python dictionary. + + Returns: + The response body as a dictionary. + """ + ... # fmt: skip + + +class DioptraSession(ABC, Generic[T]): + """The interface for communicating with the Dioptra API.""" + + @property + @abstractmethod + def url(self) -> str: + """The base URL of the API endpoints.""" + raise NotImplementedError + + @abstractmethod + def connect(self) -> None: + """Connect to the API.""" + raise NotImplementedError + + @abstractmethod + def close(self) -> None: + """Close the connection to the API.""" + raise NotImplementedError + + @abstractmethod + def make_request( + self, + method_name: str, + url: str, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a request to the API. + + All response objects must implement the DioptraResponseProtocol interface. + + Args: + method_name: The HTTP method to use. Must be one of "get", "patch", "post", + "put", or "delete". + url: The URL of the API endpoint. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def get(self, endpoint: str, *parts, params: dict[str, Any] | None = None) -> T: + """Make a GET request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def patch( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a PATCH request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def post( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a POST request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def delete( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a DELETE request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def put( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a PUT request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def download( + self, + endpoint: str, + *parts, + output_path: Path, + params: dict[str, Any] | None = None, + ) -> Path: + """Download a file from the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + output_path: The path where the downloaded file should be saved. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + The path to the downloaded file. + """ + raise NotImplementedError + + def _get( + self, endpoint: str, *parts, params: dict[str, Any] | None = None + ) -> DioptraResponseProtocol: + """Make a GET request to the API. + + The response from this internal method always implements the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + A response object that implements the DioptraResponseProtocol interface. + """ + return self.make_request("get", self.build_url(endpoint, *parts), params=params) + + def _patch( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a PATCH request to the API. + + The response from this internal method always implements the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A response object that implements the DioptraResponseProtocol interface. + """ + return self.make_request( + "patch", self.build_url(endpoint, *parts), params=params, json_=json_ + ) + + def _post( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a POST request to the API. + + The response from this internal method always implements the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A response object that implements the DioptraResponseProtocol interface. + """ + return self.make_request( + "post", self.build_url(endpoint, *parts), params=params, json_=json_ + ) + + def _delete( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a DELETE request to the API. + + The response from this internal method always implements the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A response object that implements the DioptraResponseProtocol interface. + """ + return self.make_request( + "delete", self.build_url(endpoint, *parts), params=params, json_=json_ + ) + + def _put( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a PUT request to the API. + + The response from this internal method always implements the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A response object that implements the DioptraResponseProtocol interface. + """ + return self.make_request( + "put", self.build_url(endpoint, *parts), params=params, json_=json_ + ) + + @staticmethod + def build_url(base: str, *parts) -> str: + """Build a URL from a base and one or more parts. + + Args: + base: The base URL. + *parts: The parts to join to the base URL. + + Returns: + The joined URL. + """ + return urljoin(base, *parts) + + +class CollectionClient(Generic[T]): + """The interface for an API collection client. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the CollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + self._session = session + + @property + def url(self) -> str: + """The URL of the API endpoint.""" + return self._session.build_url(self._session.url, self.name) + + +class SubCollectionClient(Generic[T]): + """The interface for an API sub-collection client. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the SubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: An ordered list of parent sub-collection clients + that own this sub-collection and are also owned by the root collection. + For example, a client for the hypothetical + /col/{id1}/subColA/{id2}/subColB sub-collection would list the client + for subColA as the parent sub-collection. + """ + self._session = session + self._root_collection = root_collection + self._parent_sub_collections: list["SubCollectionClient[T]"] = ( + parent_sub_collections or [] + ) + + def build_sub_collection_url(self, *resource_ids: str | int) -> str: + """Build a sub-collection URL owned by one or more parent resources. + + Args: + *resource_ids: The parent resource ids that own the sub-collection. + + Returns: + The joined sub-collection URL. + + Raises: + SubCollectionUrlError: If the number of resource ids does not match the + expected count. For example, a client for the hypothetical + /col/{id1}/subColA/{id2}/subColB sub-collection would expect 2 resource + ids. + """ + # Running example for hypothetical URL: /col/{id1}/subColA/{id2}/subColB + self._validate_resource_ids_count(resource_ids) + # Builds the URL root (ex: /col/{id1}) + parent_url_parts: list[str] = [ + self._root_collection.url, + str(resource_ids[0]), + ] + + # Builds the parent sub-collection parts (ex: /subColA/{id2}) + for resource_id, parent_sub_collection in zip( + resource_ids[1:], self._parent_sub_collections + ): + parent_url_parts.extend([parent_sub_collection.name, str(resource_id)]) + + # Joins the root and parent parts with the sub-collection name + # (ex: /col/{id1}/subColA/{id2}/subColB) + return self._session.build_url(*parent_url_parts, self.name) + + def _validate_resource_ids_count(self, resource_ids: tuple[str | int, ...]) -> None: + num_resource_ids = len(resource_ids) + expected_count = len(self._parent_sub_collections) + 1 + if num_resource_ids != expected_count: + raise SubCollectionUrlError( + f"Invalid number of resource ids (reason: expected {expected_count}): " + f"{num_resource_ids}" + ) diff --git a/src/dioptra/client/client.py b/src/dioptra/client/client.py new file mode 100644 index 000000000..1f16d87fb --- /dev/null +++ b/src/dioptra/client/client.py @@ -0,0 +1,218 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +import os +from posixpath import join as urljoin +from typing import Any, Final, Generic, TypeVar + +from .artifacts import ArtifactsCollectionClient +from .auth import AuthCollectionClient +from .base import DioptraResponseProtocol, DioptraSession +from .entrypoints import EntrypointsCollectionClient +from .experiments import ExperimentsCollectionClient +from .groups import GroupsCollectionClient +from .jobs import JobsCollectionClient +from .models import ModelsCollectionClient +from .plugin_parameter_types import PluginParameterTypesCollectionClient +from .plugins import PluginsCollectionClient +from .queues import QueuesCollectionClient +from .tags import TagsCollectionClient +from .users import UsersCollectionClient +from .workflows import WorkflowsCollectionClient + +DIOPTRA_V1_ROOT: Final[str] = "api/v1" +ENV_DIOPTRA_API: Final[str] = "DIOPTRA_API" + +T = TypeVar("T") + + +class DioptraClient(Generic[T]): + """The Dioptra API client.""" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the DioptraClient instance. + + Args: + session: The Dioptra API session object. + """ + self._session = session + self._users = UsersCollectionClient[T](session) + self._auth = AuthCollectionClient[T](session) + self._queues = QueuesCollectionClient[T](session) + self._tags = TagsCollectionClient[T](session) + self._groups = GroupsCollectionClient[T](session) + self._plugins = PluginsCollectionClient[T](session) + self._plugin_parameter_types = PluginParameterTypesCollectionClient[T](session) + self._experiments = ExperimentsCollectionClient[T](session) + self._jobs = JobsCollectionClient[T](session) + self._entrypoints = EntrypointsCollectionClient[T](session) + self._models = ModelsCollectionClient[T](session) + self._artifacts = ArtifactsCollectionClient[T](session) + self._workflows = WorkflowsCollectionClient[T](session) + + @property + def users(self) -> UsersCollectionClient[T]: + """The client for managing Dioptra's /users collection.""" + return self._users + + @property + def auth(self) -> AuthCollectionClient[T]: + """The client for managing Dioptra's /auth collection.""" + return self._auth + + @property + def queues(self) -> QueuesCollectionClient[T]: + """The client for managing Dioptra's /queues collection.""" + return self._queues + + @property + def tags(self) -> TagsCollectionClient[T]: + """The client for managing Dioptra's /tags collection.""" + return self._tags + + @property + def groups(self) -> GroupsCollectionClient[T]: + """The client for managing Dioptra's /groups collection.""" + return self._groups + + @property + def plugins(self) -> PluginsCollectionClient[T]: + """The client for managing Dioptra's /plugins collection.""" + return self._plugins + + @property + def plugin_parameter_types(self) -> PluginParameterTypesCollectionClient[T]: + """The client for managing Dioptra's /pluginParameterTypes collection.""" + return self._plugin_parameter_types + + @property + def experiments(self) -> ExperimentsCollectionClient[T]: + """The client for managing Dioptra's /experiments collection.""" + return self._experiments + + @property + def jobs(self) -> JobsCollectionClient[T]: + """The client for managing Dioptra's /jobs collection.""" + return self._jobs + + @property + def entrypoints(self) -> EntrypointsCollectionClient[T]: + """The client for managing Dioptra's /entrypoints collection.""" + return self._entrypoints + + @property + def models(self) -> ModelsCollectionClient[T]: + """The client for managing Dioptra's /models collection.""" + return self._models + + @property + def artifacts(self) -> ArtifactsCollectionClient[T]: + """The client for managing Dioptra's /artifacts collection.""" + return self._artifacts + + @property + def workflows(self) -> WorkflowsCollectionClient[T]: + """The client for managing Dioptra's /workflows collection.""" + return self._workflows + + def close(self) -> None: + """Close the client's connection to the API.""" + self._session.close() + + +def connect_response_dioptra_client( + address: str | None = None, +) -> DioptraClient[DioptraResponseProtocol]: + """Connect a client to the Dioptra API that returns response objects. + + This client always returns a response object regardless of the response status code. + It is the responsibility of the caller to check the status code and handle any + errors. + + Args: + address: The Dioptra web address. This is the same address used to access the + web GUI, e.g. "https://dioptra.example.org". Note that the + "/api/" suffix is omitted. If None, then the DIOPTRA_API + environment variable will be checked and used. + + Returns: + A Dioptra client. + + Raises: + ValueError: If address is None and the DIOPTRA_API environment variable is not + set. + """ + from .sessions import DioptraRequestsSession + + return DioptraClient[DioptraResponseProtocol]( + session=DioptraRequestsSession(_build_api_address(address)) + ) + + +def connect_json_dioptra_client( + address: str | None = None, +) -> DioptraClient[dict[str, Any]]: + """Connect a client to the Dioptra API that returns JSON-like Python dictionaries. + + In contrast to the client that returns response objects, this client will raise an + exception for any non-2xx response status code. + + Args: + address: The Dioptra web address. This is the same address used to access the + web GUI, e.g. "https://dioptra.example.org". Note that the + "/api/" suffix is omitted. If None, then the DIOPTRA_API + environment variable will be checked and used. + + Returns: + A Dioptra client. + + Raises: + ValueError: If address is None and the DIOPTRA_API environment variable is not + set. + """ + from .sessions import DioptraRequestsSessionJson + + return DioptraClient[dict[str, Any]]( + session=DioptraRequestsSessionJson(_build_api_address(address)) + ) + + +def _build_api_address(address: str | None) -> str: + """Build the Dioptra API address. + + Args: + address: The Dioptra web address. This is the same address used to access the + web GUI, e.g. "https://dioptra.example.org". Note that the + "/api/" suffix is omitted. If None, then the DIOPTRA_API + environment variable will be checked and used. + + Returns: + The Dioptra API address. + + Raises: + ValueError: If address is None and the DIOPTRA_API environment variable is not + set. + """ + if address is not None: + return urljoin(address, DIOPTRA_V1_ROOT) + + if (dioptra_api := os.getenv(ENV_DIOPTRA_API)) is None: + raise ValueError( + f"The {ENV_DIOPTRA_API} environment variable must be set if the " + "address is not provided." + ) + + return urljoin(dioptra_api, DIOPTRA_V1_ROOT) diff --git a/src/dioptra/client/drafts.py b/src/dioptra/client/drafts.py new file mode 100644 index 000000000..d87c5052f --- /dev/null +++ b/src/dioptra/client/drafts.py @@ -0,0 +1,404 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +import warnings +from typing import Any, ClassVar, Generic, Protocol, TypeVar, cast + +from .base import ( + CollectionClient, + DioptraClientError, + DioptraSession, + FieldsValidationError, + SubCollectionClient, + SubCollectionUrlError, +) + +T = TypeVar("T") + + +class DraftFieldsValidationError(DioptraClientError): + """Raised when one or more draft fields are invalid.""" + + +class ValidateDraftFieldsProtocol(Protocol): + def __call__(self, json_: dict[str, Any]) -> dict[str, Any]: + ... # fmt: skip + + +def make_draft_fields_validator( + draft_fields: set[str], resource_name: str +) -> ValidateDraftFieldsProtocol: + """Create a function to validate the allowed draft fields. + + Args: + draft_fields: The allowed draft fields. + resource_name: The name of the resource the draft fields are for. + + Returns: + The function to validate the allowed draft fields. + """ + + def validate_draft_fields(json_: dict[str, Any]) -> dict[str, Any]: + """Validate the provided draft fields. + + Args: + json_: The draft fields to validate. + + Returns: + The validated draft fields. + + Raises: + DraftFieldsValidationError: If one or more draft fields are invalid or + missing. + """ + provided_fields = set(json_.keys()) + + if draft_fields != provided_fields: + invalid_fields = provided_fields - draft_fields + missing_fields = draft_fields - provided_fields + reason: list[str] = [] + + if invalid_fields: + reason.append(f"{invalid_fields} are invalid") + + if missing_fields: + reason.append(f"{missing_fields} are missing") + + raise DraftFieldsValidationError( + "Invalid or missing fields for resource draft " + f"(reason: {', '.join(reason)}): {resource_name}" + ) + + return json_ + + return validate_draft_fields + + +class NewResourceDraftsSubCollectionClient(Generic[T]): + """The client for managing a new resource drafts sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "drafts" + + def __init__( + self, + session: DioptraSession[T], + validate_fields_fn: ValidateDraftFieldsProtocol, + root_collection: CollectionClient[T], + parent_sub_collections: list[SubCollectionClient[T]] | None = None, + ) -> None: + """Initialize the NewResourceDraftsSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + validate_fields_fn: The function to validate the allowed draft fields. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: An ordered list of parent sub-collection clients + that own this sub-collection and are also owned by the root collection. + For example, a client for the hypothetical /col/{id1}/subColA/drafts + collection would list the client for subColA as the parent + sub-collection. + """ + self._session = session + self._validate_fields = validate_fields_fn + self._root_collection = root_collection + self._parent_sub_collections: list[SubCollectionClient[T]] = ( + parent_sub_collections or [] + ) + + def get( + self, + *resource_ids: str | int, + draft_type: str | None = None, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + ) -> T: + """Get the list of new resource drafts. + + Args: + *resource_ids: The parent resource ids that own the new resource drafts + sub-collection. + draft_type: The type of drafts to return: all, existing, or new. Optional, + defaults to None. + group_id: The group id the drafts belong to. If None, return drafts from all + groups that the user has access to. Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of drafts to return in the paged response. + Optional, defaults to 10. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if group_id is not None: + params["groupId"] = group_id + + if draft_type is not None: + params["draftType"] = draft_type + + return self._session.get( + self.build_sub_collection_url(*resource_ids), params=params + ) + + def get_by_id(self, *resource_ids: str | int, draft_id: int) -> T: + """Get a new resource draft by its id. + + Args: + *resource_ids: The parent resource ids that own the new resource drafts + sub-collection. + draft_id: The draft id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(*resource_ids), str(draft_id) + ) + + def create( + self, *resource_ids: str | int, group_id: int | None = None, **kwargs + ) -> T: + """Create a new resource draft. + + Args: + *resource_ids: The parent resource ids that own the new resource drafts + sub-collection. + group_id: The id for the group that will own the resource when the draft is + published. + **kwargs: The draft fields. + + Returns: + The response from the Dioptra API. + + Raises: + FieldsValidationError: If "group" is specified in kwargs or if group_id is + None and the client has no parent sub-collections. + DraftFieldsValidationError: If one or more draft fields are invalid or + missing. + """ + + if "group" in kwargs: + raise FieldsValidationError( + "Invalid argument (reason: keyword is reserved): group" + ) + + data: dict[str, Any] = ( + self._validate_group_id(group_id) | self._validate_fields(kwargs) + ) # fmt: skip + return self._session.post( + self.build_sub_collection_url(*resource_ids), json_=data + ) + + def modify(self, *resource_ids: str | int, draft_id: int, **kwargs) -> T: + """Modify the new resource draft matching the provided id. + + Args: + *resource_ids: The parent resource ids that own the new resource drafts + sub-collection. + draft_id: The draft id, an integer. + **kwargs: The draft fields to modify. + + Returns: + The response from the Dioptra API. + + Raises: + FieldsValidationError: If "draftId" is specified in kwargs. + DraftFieldsValidationError: If one or more draft fields are invalid or + missing. + """ + if "draftId" in kwargs: + raise FieldsValidationError( + "Invalid argument (reason: keyword is reserved): draftId" + ) + + return self._session.put( + self.build_sub_collection_url(*resource_ids), + str(draft_id), + json_=self._validate_fields(kwargs), + ) + + def delete(self, *resource_ids: str | int, draft_id: int) -> T: + """Delete the new resource draft matching the provided id. + + Args: + *resource_ids: The parent resource ids that own the new resource drafts + sub-collection. + draft_id: The draft id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(*resource_ids), str(draft_id) + ) + + def build_sub_collection_url(self, *resource_ids: str | int) -> str: + """Build a sub-collection URL owned by one or more parent resources. + + Args: + *resource_ids: The parent resource ids that own the sub-collection. + + Returns: + The joined sub-collection URL. + + Raises: + SubCollectionUrlError: If the number of resource ids does not match the + expected count. For example, a client for the hypothetical + /col/{id1}/subColA/{id2}/subColB/drafts sub-collection would expect 2 + resource ids. + """ + self._validate_resource_ids_count(*resource_ids) + parent_url_parts: list[str] = [self._root_collection.url] + + for resource_id, parent_sub_collection in zip( + resource_ids, self._parent_sub_collections + ): + parent_url_parts.extend([str(resource_id), parent_sub_collection.name]) + + return self._session.build_url(*parent_url_parts, self.name) + + def _validate_group_id(self, group_id: int | None) -> dict[str, Any]: + if not self._parent_sub_collections: + if group_id is None: + raise FieldsValidationError( + "Invalid argument (reason: argument cannot be None): group_id" + ) + + return {"group": group_id} + + if group_id is not None: + warnings.warn( + '"group_id" argument ignored (reason: creating draft for ' + "sub-resource where group is known)", + stacklevel=2, + ) + + return cast(dict[str, Any], {}) + + def _validate_resource_ids_count(self, *resource_ids: str | int) -> None: + num_resource_ids = len(resource_ids) + expected_count = len(self._parent_sub_collections) + if num_resource_ids != expected_count: + raise SubCollectionUrlError( + f"Invalid number of resource ids (reason: expected {expected_count}): " + f"{num_resource_ids}" + ) + + +class ModifyResourceDraftsSubCollectionClient(SubCollectionClient[T]): + """The client for managing a resource modification drafts sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "draft" + + def __init__( + self, + session: DioptraSession[T], + validate_fields_fn: ValidateDraftFieldsProtocol, + root_collection: CollectionClient[T], + parent_sub_collections: list[SubCollectionClient[T]] | None = None, + ) -> None: + """Initialize the ModifyResourceDraftsSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + validate_fields_fn: The function to validate the allowed draft fields. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: An ordered list of parent sub-collection clients + that own this sub-collection and are also owned by the root collection. + For example, a client for the hypothetical + /col/{id1}/subColA/{id2}/draft collection would list the client for + subColA as the parent sub-collection. + """ + super().__init__( + session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + self._validate_fields = validate_fields_fn + + def get_by_id(self, *resource_ids: str | int) -> T: + """Get a resource modification draft. + + Args: + *resource_ids: The parent resource ids that own the sub-collection. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.build_sub_collection_url(*resource_ids)) + + def create(self, *resource_ids: str | int, **kwargs) -> T: + """Create a resource modification draft. + + Args: + *resource_ids: The parent resource ids that own the sub-collection. + **kwargs: The draft fields. + + Returns: + The response from the Dioptra API. + + Raises: + DraftFieldsValidationError: If one or more draft fields are invalid or + missing. + """ + return self._session.post( + self.build_sub_collection_url(*resource_ids), + json_=self._validate_fields(kwargs), + ) + + def modify(self, *resource_ids: str | int, **kwargs) -> T: + """Modify a resource modification draft. + + Args: + *resource_ids: The parent resource ids that own the sub-collection. + **kwargs: The draft fields to modify. + + Returns: + The response from the Dioptra API. + + Raises: + DraftFieldsValidationError: If one or more draft fields are invalid or + missing. + """ + return self._session.put( + self.build_sub_collection_url(*resource_ids), + json_=self._validate_fields(kwargs), + ) + + def delete(self, *resource_ids: str | int) -> T: + """Delete a resource modification draft. + + Args: + *resource_ids: The parent resource ids that own the sub-collection. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.build_sub_collection_url(*resource_ids)) diff --git a/src/dioptra/client/entrypoints.py b/src/dioptra/client/entrypoints.py new file mode 100644 index 000000000..23459fb78 --- /dev/null +++ b/src/dioptra/client/entrypoints.py @@ -0,0 +1,571 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import ( + CollectionClient, + DioptraClientError, + DioptraSession, + SubCollectionClient, +) +from .drafts import ( + ModifyResourceDraftsSubCollectionClient, + NewResourceDraftsSubCollectionClient, + make_draft_fields_validator, +) +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +DRAFT_FIELDS: Final[set[str]] = { + "name", + "description", + "taskGraph", + "parameters", + "queues", + "plugins", +} + +T = TypeVar("T") + + +class EntrypointPluginsSubCollectionClient(SubCollectionClient[T]): + """The client for managing Dioptra's /entrypoints/{id}/plugins sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "plugins" + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the EntrypointPluginsSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: Unused in this client, must be None. + """ + if parent_sub_collections is not None: + raise DioptraClientError( + "The parent_sub_collections argument must be None for this client." + ) + + super().__init__( + session=session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + + def get(self, entrypoint_id: int | str) -> T: + """Get a list of plugins added to the entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.build_sub_collection_url(entrypoint_id)) + + def get_by_id(self, entrypoint_id: str | int, plugin_id: str | int) -> T: + """Get the entrypoint plugin matching the provided id. + + Args: + entrypoint_id: The entrypoint id, an integer. + plugin_id: The id for the plugin that will be removed. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(entrypoint_id), str(plugin_id) + ) + + def create( + self, + entrypoint_id: str | int, + plugin_ids: list[int], + ) -> T: + """Adds one or more plugins to the entrypoint. + + If a plugin id matches an plugin that is already attached to the entrypoint, + then the entrypoint will update the plugin to the latest version. + + Args: + entrypoint_id: The entrypoint id, an integer. + plugin_ids: A list of plugin ids that will be registered to the entrypoint. + + Returns: + The response from the Dioptra API. + """ + json_ = {"plugins": plugin_ids} + return self._session.post( + self.build_sub_collection_url(entrypoint_id), json_=json_ + ) + + def delete_by_id(self, entrypoint_id: str | int, plugin_id: str | int) -> T: + """Remove a plugin from the entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + plugin_id: The id for the plugin that will be removed. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(entrypoint_id), str(plugin_id) + ) + + +class EntrypointQueuesSubCollectionClient(SubCollectionClient[T]): + """The client for managing Dioptra's /entrypoints/{id}/queues sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "queues" + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the EntrypointQueuesSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: Unused in this client, must be None. + """ + if parent_sub_collections is not None: + raise DioptraClientError( + "The parent_sub_collections argument must be None for this client." + ) + + super().__init__( + session=session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + + def get(self, entrypoint_id: int | str) -> T: + """Get a list of queues added to the entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.build_sub_collection_url(entrypoint_id)) + + def create( + self, + entrypoint_id: str | int, + queue_ids: list[int], + ) -> T: + """Adds one or more queues to the entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + queue_ids: A list of queue ids that will be registered to the entrypoint. + + Returns: + The response from the Dioptra API. + """ + json_ = {"ids": queue_ids} + return self._session.post( + self.build_sub_collection_url(entrypoint_id), json_=json_ + ) + + def delete(self, entrypoint_id: str | int) -> T: + """Remove all queues from the entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.build_sub_collection_url(entrypoint_id)) + + def modify_by_id( + self, + entrypoint_id: str | int, + queue_ids: list[int], + ) -> T: + """Replaces the entrypoint's full list of queues. + + If an empty list is provided, then all queues will be removed from the + entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + queue_ids: A list of queue ids that will replace the current list of + entrypoint queues. + + Returns: + The response from the Dioptra API. + """ + json_ = {"ids": queue_ids} + return self._session.put( + self.build_sub_collection_url(entrypoint_id), json_=json_ + ) + + def delete_by_id(self, entrypoint_id: str | int, queue_id: str | int) -> T: + """Remove a queue from the entrypoint. + + Args: + entrypoint_id: The entrypoint id, an integer. + queue_id: The id for the queue that will be removed. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(entrypoint_id), str(queue_id) + ) + + +class EntrypointsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /entrypoints collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "entrypoints" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the EntrypointsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._plugins = EntrypointPluginsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._queues = EntrypointQueuesSubCollectionClient[T]( + session=session, root_collection=self + ) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + + @property + def plugins(self) -> EntrypointPluginsSubCollectionClient[T]: + """The client for managing the plugins sub-collection.""" + return self._plugins + + @property + def queues(self) -> EntrypointQueuesSubCollectionClient[T]: + """The client for managing the queues sub-collection.""" + return self._queues + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new entrypoint drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new entrypoint drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/entrypoints/drafts + client.entrypoints.new_resource_drafts.get() + + # GET /api/v1/entrypoints/drafts/1 + client.entrypoints.new_resource_drafts.get_by_id(draft_id=1) + + # PUT /api/v1/entrypoints/drafts/1 + client.entrypoints.new_resource_drafts.modify( + draft_id=1, name="new-name", description="new-description" + ) + + # POST /api/v1/entrypoints/drafts + client.entrypoints.new_resource_drafts.create( + group_id=1, name="name", description="description" + ) + + # DELETE /api/v1/entrypoints/drafts/1 + client.entrypoints.new_resource_drafts.delete(draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """The client for managing the entrypoint modification drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the entrypoint modification drafts sub-collection. Below are examples + of how HTTP requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/entrypoints/1/draft + client.entrypoints.modify_resource_drafts.get_by_id(1) + + # PUT /api/v1/entrypoints/1/draft + client.entrypoints.modify_resource_drafts.modify( + 1, name="new-name", description="new-description" + ) + + # POST /api/v1/entrypoints/1/draft + client.entrypoints.modify_resource_drafts.create( + 1, name="name", description="description" + ) + + # DELETE /api/v1/entrypoints/1/draft + client.entrypoints.modify_resource_drafts.delete(1) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving entrypoint resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the entrypoint snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an active Python + Dioptra Python client called ``client``:: + + # GET /api/v1/entrypoints/1/snapshots + client.entrypoints.snapshots.get(1) + + # GET /api/v1/entrypoints/1/snapshots/2 + client.entrypoints.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """ + The client for managing the tags sub-collection owned by the /entrypoints + collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/entrypoints/1/tags + client.entrypoints.tags.get(1) + + # PUT /api/v1/entrypoints/1/tags + client.entrypoints.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/entrypoints/1/tags + client.entrypoints.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/entrypoints/1/tags/3 + client.entrypoints.tags.remove(1, tag_id=3) + + # DELETE /api/v1/entrypoints/1/tags + client.entrypoints.tags.remove(1) + """ + return self._tags + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of entrypoints. + + Args: + group_id: The group id the entrypoints belong to. If None, return + entrypoints from all groups that the user has access to. Optional, + defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of entrypoints to return in the paged + response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for entrypoints using the Dioptra API's query language. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, entrypoint_id: str | int) -> T: + """Get the entrypoint matching the provided id. + + Args: + entrypoint_id: The entrypoint id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(entrypoint_id)) + + def create( + self, + group_id: int, + name: str, + task_graph: str, + description: str | None = None, + parameters: list[dict[str, Any]] | None = None, + queues: list[int] | None = None, + plugins: list[int] | None = None, + ) -> T: + """Creates a entrypoint. + + Args: + group_id: The id of the group that will own the entrypoint. + name: The name of the new entrypoint. + task_graph: The task graph for the new entrypoint as a YAML-formatted + string. + description: The description of the new entrypoint. Optional, defaults to + None. + parameters: The list of parameters for the new entrypoint. Optional, + defaults to None. + queues: A list of queue ids to associate with the new entrypoint. Optional, + defaults to None. + plugins: A list of plugin ids to associate with the new entrypoint. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = { + "group": group_id, + "name": name, + "taskGraph": task_graph, + } + + if description is not None: + json_["description"] = description + + if parameters is not None: + json_["parameters"] = parameters + + if queues is not None: + json_["queues"] = queues + + if plugins is not None: + json_["plugins"] = plugins + + return self._session.post(self.url, json_=json_) + + def modify_by_id( + self, + entrypoint_id: str | int, + name: str, + task_graph: str, + description: str | None, + parameters: list[dict[str, Any]] | None, + queues: list[int] | None, + ) -> T: + """Modify the entrypoint matching the provided id. + + Args: + entrypoint_id: The entrypoint id, an integer. + name: The new name of the entrypoint. + task_graph: The new task graph for the entrypoint as a YAML-formatted + string. + description: The new description of the entrypoint. To remove the + description, pass None. + parameters: The new list of parameters for the entrypoint. To remove all + parameters, pass None. + queues: The new list of queue ids to associate with the entrypoint. To + remove all associated queues, pass None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = {"name": name, "taskGraph": task_graph} + + if description is not None: + json_["description"] = description + + if parameters is not None: + json_["parameters"] = parameters + + if queues is not None: + json_["queues"] = queues + + return self._session.put(self.url, str(entrypoint_id), json_=json_) + + def delete_by_id(self, entrypoint_id: str | int) -> T: + """Delete the entrypoint matching the provided id. + + Args: + entrypoint_id: The entrypoint id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(entrypoint_id)) diff --git a/src/dioptra/client/experiments.py b/src/dioptra/client/experiments.py new file mode 100644 index 000000000..605559e8b --- /dev/null +++ b/src/dioptra/client/experiments.py @@ -0,0 +1,704 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import ( + CollectionClient, + DioptraClientError, + DioptraSession, + SubCollectionClient, +) +from .drafts import ( + ModifyResourceDraftsSubCollectionClient, + NewResourceDraftsSubCollectionClient, + make_draft_fields_validator, +) +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +ARTIFACTS: Final[str] = "artifacts" +MLFLOW_RUN: Final[str] = "mlflowRun" +STATUS: Final[str] = "status" + +DRAFT_FIELDS: Final[set[str]] = {"name", "description", "entrypoints"} + +T = TypeVar("T") + + +class ExperimentEntrypointsSubCollectionClient(SubCollectionClient[T]): + """The client for managing Dioptra's /experiments/{id}/entrypoints sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "entrypoints" + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the ExperimentEntrypointsSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: Unused in this client, must be None. + """ + if parent_sub_collections is not None: + raise DioptraClientError( + "The parent_sub_collections argument must be None for this client." + ) + + super().__init__( + session=session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + + def get(self, experiment_id: int | str) -> T: + """Get a list of entrypoints added to the experiment. + + Args: + experiment_id: The experiment id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.build_sub_collection_url(experiment_id)) + + def create( + self, + experiment_id: str | int, + entrypoint_ids: list[int], + ) -> T: + """Adds one or more entrypoints to the experiment. + + If an entrypoint id matches an entrypoint that is already attached to the + experiment, then the experiment will update the entrypoint to the latest + version. + + Args: + experiment_id: The experiment id, an integer. + entrypoint_ids: A list of entrypoint ids that will be registered to the + experiment. + + Returns: + The response from the Dioptra API. + """ + json_ = {"ids": entrypoint_ids} + return self._session.post( + self.build_sub_collection_url(experiment_id), json_=json_ + ) + + def delete(self, experiment_id: str | int) -> T: + """Remove all entrypoints from the experiment. + + Args: + experiment_id: The experiment id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.build_sub_collection_url(experiment_id)) + + def modify_by_id( + self, + experiment_id: str | int, + entrypoint_ids: list[int], + ) -> T: + """Replaces the experiment's full list of entrypoints. + + If an entrypoint id matches an entrypoint that is already attached to the + experiment, then the experiment will update the entrypoint to the latest + version. If an empty list is provided, then all entrypoints will be removed from + the experiment. + + Args: + experiment_id: The experiment id, an integer. + entrypoint_ids: A list of entrypoint ids that will replace the current list + of experiment entrypoints. + + Returns: + The response from the Dioptra API. + """ + json_ = {"ids": entrypoint_ids} + return self._session.put( + self.build_sub_collection_url(experiment_id), json_=json_ + ) + + def delete_by_id(self, experiment_id: str | int, entrypoint_id: str | int) -> T: + """Remove an entrypoint from the experiment. + + Args: + experiment_id: The experiment id, an integer. + entrypoint_id: The id for the entrypoint that will be removed. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(experiment_id), str(entrypoint_id) + ) + + +class ExperimentJobsSubCollectionClient(SubCollectionClient[T]): + """The client for managing Dioptra's /experiments/{id}/jobs sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "jobs" + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the ExperimentJobsSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: Unused in this client, must be None. + """ + if parent_sub_collections is not None: + raise DioptraClientError( + "The parent_sub_collections argument must be None for this client." + ) + + super().__init__( + session=session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + + def get( + self, + experiment_id: str | int, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get an experiment's jobs. + + Args: + experiment_id: The experiment id, an integer. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of jobs to return in the paged + response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for models using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + return self._session.get( + self.build_sub_collection_url(experiment_id), params=params + ) + + def get_by_id(self, experiment_id: str | int, job_id: str | int) -> T: + """Get a specific job from an experiment. + + Args: + experiment_id: The experiment id, an integer. + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(experiment_id), + str(job_id), + ) + + def create( + self, + experiment_id: str | int, + entrypoint_id: int, + queue_id: int, + values: dict[str, Any] | None = None, + timeout: str | None = None, + description: str | None = None, + ) -> T: + """Creates a job for an experiment. + + Args: + experiment_id: The experiment id, an integer. + entrypoint_id: The id for the entrypoint that the job will run. + queue_id: The id for the queue that will execute the job. + values: A dictionary of keyword arguments to pass to the entrypoint that + parameterize the job. + timeout: The maximum alloted time for a job before it times out and is + stopped. If omitted, the job timeout will use the default set in the + API. + description: The description for the job. Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = {"entrypoint": entrypoint_id, "queue": queue_id} + + if values is not None: + json_["values"] = values + + if timeout is not None: + json_["timeout"] = timeout + + if description is not None: + json_["description"] = description + + return self._session.post( + self.build_sub_collection_url(experiment_id), json_=json_ + ) + + def delete_by_id(self, experiment_id: str | int, job_id: str | int) -> T: + """Delete a job from the experiment. + + Args: + experiment_id: The experiment id, an integer. + job_id: The id for the job that will be deleted. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(experiment_id), str(job_id) + ) + + def create_artifact( + self, + experiment_id: str | int, + job_id: str | int, + uri: str, + description: str | None = None, + ) -> T: + """Creates a job artifact for an experiment. + + Args: + experiment_id: The experiment id, an integer. + job_id: The id of the job that produced this artifact. + uri: The URI pointing to the location of the artifact. + description: The description of the new artifact. Optional, defaults to + None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"uri": uri} + + if description is not None: + json_["description"] = description + + return self._session.post( + self.build_sub_collection_url(experiment_id), + str(job_id), + ARTIFACTS, + json_=json_, + ) + + def get_mlflow_run_id(self, experiment_id: str | int, job_id: str | int) -> T: + """Gets the MLflow run id for an experiment's job. + + Args: + experiment_id: The experiment id, an integer. + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(experiment_id), str(job_id), MLFLOW_RUN + ) + + def set_mlflow_run_id( + self, experiment_id: str | int, job_id: str | int, mlflow_run_id: str + ) -> T: + """Sets the MLflow run id for an experiment's job. + + Args: + experiment_id: The experiment id, an integer. + job_id: The job id, an integer. + mlflow_run_id: The UUid as a string for the associated MLflow run. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "mlflowRunId": mlflow_run_id, + } + return self._session.post( + self.build_sub_collection_url(experiment_id), + str(job_id), + MLFLOW_RUN, + json_=json_, + ) + + def get_status(self, experiment_id: str | int, job_id: str | int) -> T: + """Gets the status for an experiment's job. + + Args: + experiment_id: The experiment id, an integer. + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(experiment_id), str(job_id), STATUS + ) + + def set_status(self, experiment_id: str | int, job_id: str | int, status: str) -> T: + """Sets the status for an experiment's job. + + Args: + experiment_id: The experiment id, an integer. + job_id: The job id, an integer. + status: The new status for the job. The allowed values are: queued, started, + deferred, finished, failed. + + Returns: + The response from the Dioptra API. + """ + json_ = {"status": status} + return self._session.put( + self.build_sub_collection_url(experiment_id), + str(job_id), + STATUS, + json_=json_, + ) + + +class ExperimentsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /experiments collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "experiments" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the ExperimentsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._entrypoints = ExperimentEntrypointsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._jobs = ExperimentJobsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + + @property + def entrypoints(self) -> ExperimentEntrypointsSubCollectionClient[T]: + """The client for managing the entrypoints sub-collection.""" + return self._entrypoints + + @property + def jobs(self) -> ExperimentJobsSubCollectionClient[T]: + """The client for managing the jobs sub-collection.""" + return self._jobs + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new experiment drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new experiment drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/experiments/drafts + client.experiments.new_resource_drafts.get() + + # GET /api/v1/experiments/drafts/1 + client.experiments.new_resource_drafts.get_by_id(draft_id=1) + + # PUT /api/v1/experiments/drafts/1 + client.experiments.new_resource_drafts.modify( + draft_id=1, name="new-name", description="new-description" + ) + + # POST /api/v1/experiments/drafts + client.experiments.new_resource_drafts.create( + group_id=1, name="name", description="description" + ) + + # DELETE /api/v1/experiments/drafts/1 + client.experiments.new_resource_drafts.delete(draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """The client for managing the experiment modification drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the experiment modification drafts sub-collection. Below are examples + of how HTTP requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/experiments/1/draft + client.experiments.modify_resource_drafts.get_by_id(1) + + # PUT /api/v1/experiments/1/draft + client.experiments.modify_resource_drafts.modify( + 1, name="new-name", description="new-description" + ) + + # POST /api/v1/experiments/1/draft + client.experiments.modify_resource_drafts.create( + 1, name="name", description="description" + ) + + # DELETE /api/v1/experiments/1/draft + client.experiments.modify_resource_drafts.delete(1) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving experiment resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the experiment snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an active Python + Dioptra Python client called ``client``:: + + # GET /api/v1/experiments/1/snapshots + client.experiments.snapshots.get(1) + + # GET /api/v1/experiments/1/snapshots/2 + client.experiments.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """ + The client for managing the tags sub-collection owned by the /experiments + collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/experiments/1/tags + client.experiments.tags.get(1) + + # PUT /api/v1/experiments/1/tags + client.experiments.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/experiments/1/tags + client.experiments.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/experiments/1/tags/3 + client.experiments.tags.remove(1, tag_id=3) + + # DELETE /api/v1/experiments/1/tags + client.experiments.tags.remove(1) + """ + return self._tags + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of experiments. + + Args: + group_id: The group id the experiments belong to. If None, return + experiments from all groups that the user has access to. Optional, + defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of experiments to return in the paged + response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for experiments using the Dioptra API's query language. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, experiment_id: str | int) -> T: + """Get the experiment matching the provided id. + + Args: + experiment_id: The experiment id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(experiment_id)) + + def create( + self, + group_id: int, + name: str, + description: str | None = None, + entrypoints: list[int] | None = None, + ) -> T: + """Creates an experiment. + + Args: + group_id: The id of the group that will own the experiment. + name: The name of the new experiment. + description: The description of the new experiment. Optional, defaults to + None. + entrypoints: A list of entrypoint ids to associate with the new experiment. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = { + "group": group_id, + "name": name, + } + + if description is not None: + json_["description"] = description + + if entrypoints is not None: + json_["entrypoints"] = entrypoints + + return self._session.post(self.url, json_=json_) + + def modify_by_id( + self, + experiment_id: str | int, + name: str, + description: str | None, + entrypoints: list[int] | None, + ) -> T: + """Modify the experiment matching the provided id. + + Args: + experiment_id: The experiment id, an integer. + name: The new name of the experiment. + description: The new description of the experiment. To remove the + description, pass None. + entrypoints: A new list of entrypoint ids to associate with the experiment. + To remove all associated entrypoints, pass an empty list or None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = {"name": name} + + if description is not None: + json_["description"] = description + + if entrypoints is not None: + json_["entrypoints"] = entrypoints + + return self._session.put(self.url, str(experiment_id), json_=json_) + + def delete_by_id(self, experiment_id: str | int) -> T: + """Delete the experiment matching the provided id. + + Args: + experiment_id: The experiment id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(experiment_id)) diff --git a/src/dioptra/client/groups.py b/src/dioptra/client/groups.py new file mode 100644 index 000000000..a1fe5371b --- /dev/null +++ b/src/dioptra/client/groups.py @@ -0,0 +1,81 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, TypeVar + +from .base import CollectionClient, DioptraSession + +T = TypeVar("T") + + +class GroupsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /groups collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "groups" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the GroupsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + + def get( + self, + index: int = 0, + page_length: int = 10, + search: str | None = None, + ) -> T: + """Get a list of groups. + + Args: + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of groups to return in the paged response. + Optional, defaults to 10. + search: Search for groups using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if search is not None: + params["search"] = search + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, group_id: str | int) -> T: + """Get the group matching the provided id. + + Args: + group_id: The group id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(group_id)) diff --git a/src/dioptra/client/jobs.py b/src/dioptra/client/jobs.py new file mode 100644 index 000000000..c14750f3e --- /dev/null +++ b/src/dioptra/client/jobs.py @@ -0,0 +1,205 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import CollectionClient, DioptraSession +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +MLFLOW_RUN: Final[str] = "mlflowRun" +STATUS: Final[str] = "status" + +T = TypeVar("T") + + +class JobsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /jobs collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "jobs" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the JobsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving job resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the job snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an active Python + Dioptra Python client called ``client``:: + + # GET /api/v1/jobs/1/snapshots + client.jobs.snapshots.get(1) + + # GET /api/v1/jobs/1/snapshots/2 + client.jobs.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """ + The client for managing the tags sub-collection owned by the /jobs + collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/jobs/1/tags + client.jobs.tags.get(1) + + # PUT /api/v1/jobs/1/tags + client.jobs.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/jobs/1/tags + client.jobs.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/jobs/1/tags/3 + client.jobs.tags.remove(1, tag_id=3) + + # DELETE /api/v1/jobs/1/tags + client.jobs.tags.remove(1) + """ + return self._tags + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of jobs. + + Args: + group_id: The group id the jobs belong to. If None, return + jobs from all groups that the user has access to. Optional, + defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of jobs to return in the paged + response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for jobs using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, job_id: str | int) -> T: + """Get the job matching the provided id. + + Args: + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(job_id)) + + def delete_by_id(self, job_id: str | int) -> T: + """Delete the job matching the provided id. + + Args: + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(job_id)) + + def get_mlflow_run_id(self, job_id: int) -> T: + """Gets the MLflow run id for a job. + + Args: + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(job_id), MLFLOW_RUN) + + def set_mlflow_run_id(self, job_id: int, mlflow_run_id: str) -> T: + """Sets the MLflow run id for a job. + + Args: + job_id: The job id, an integer. + mlflow_run_id: The UUID as a string for the associated MLflow run. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "mlflowRunId": mlflow_run_id, + } + + return self._session.post(self.url, str(job_id), MLFLOW_RUN, json_=json_) + + def get_status(self, job_id: int) -> T: + """Gets the status for a job. + + Args: + job_id: The job id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(job_id), STATUS) diff --git a/src/dioptra/client/models.py b/src/dioptra/client/models.py new file mode 100644 index 000000000..05d8dedcb --- /dev/null +++ b/src/dioptra/client/models.py @@ -0,0 +1,434 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import ( + CollectionClient, + DioptraClientError, + DioptraSession, + SubCollectionClient, +) +from .drafts import ( + ModifyResourceDraftsSubCollectionClient, + NewResourceDraftsSubCollectionClient, + make_draft_fields_validator, +) +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +DRAFT_FIELDS: Final[set[str]] = {"name", "description"} + +T = TypeVar("T") + + +class ModelVersionsSubCollectionClient(SubCollectionClient[T]): + """The client for managing Dioptra's /models/{id}/versions sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "versions" + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the ModelVersionsSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: Unused in this client, must be None. + """ + if parent_sub_collections is not None: + raise DioptraClientError( + "The parent_sub_collections argument must be None for this client." + ) + + super().__init__( + session=session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + + def get( + self, + model_id: int | str, + index: int = 0, + page_length: int = 10, + search: str | None = None, + ) -> T: + """Get a list of versions for a model. + + Args: + model_id: The model id, an integer. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of versions to return in the paged response. + Optional, defaults to 10. + search: Search for versions using the Dioptra API's query language. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if search is not None: + params["search"] = search + + return self._session.get( + self.build_sub_collection_url(model_id), + params=params, + ) + + def get_by_id(self, model_id: str | int, version_number: str | int) -> T: + """Get a model version. + + Args: + model_id: The model id, an integer. + version_number: The version number for the model. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(model_id), + str(version_number), + ) + + def create( + self, + model_id: str | int, + artifact_id: str | int, + description: str | None = None, + ) -> T: + """Creates a new version of a model. + + Args: + model_id: The model id, an integer. + artifact_id: The id for the artifact that will be used as the new version of + the model. + description: The description of the new version. Optional, defaults to + None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"artifact": artifact_id} + + if description is not None: + json_["description"] = description + + return self._session.post(self.build_sub_collection_url(model_id), json_=json_) + + def modify_by_id( + self, + model_id: str | int, + version_number: str | int, + description: str | None = None, + ) -> T: + """Modify a model version. + + Args: + model_id: The model id, an integer. + version_number: The version number for the model. + description: The updated description for the model version. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = {} + + if description is not None: + json_["description"] = description + + return self._session.put( + self.build_sub_collection_url(model_id), str(version_number), json_=json_ + ) + + +class ModelsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /models collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "models" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the ModelsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + self._versions = ModelVersionsSubCollectionClient[T]( + session=session, root_collection=self + ) + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new model drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new model drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/models/drafts + client.models.new_resource_drafts.get() + + # GET /api/v1/models/drafts/1 + client.models.new_resource_drafts.get_by_id(draft_id=1) + + # PUT /api/v1/models/drafts/1 + client.models.new_resource_drafts.modify( + draft_id=1, name="new-name", description="new-description" + ) + + # POST /api/v1/models/drafts + client.models.new_resource_drafts.create( + group_id=1, name="name", description="description" + ) + + # DELETE /api/v1/models/drafts/1 + client.models.new_resource_drafts.delete(draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """The client for managing the model modification drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the model modification drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/models/1/draft + client.models.modify_resource_drafts.get_by_id(1) + + # PUT /api/v1/models/1/draft + client.models.modify_resource_drafts.modify( + 1, name="new-name", description="new-description" + ) + + # POST /api/v1/models/1/draft + client.models.modify_resource_drafts.create( + 1, name="name", description="description" + ) + + # DELETE /api/v1/models/1/draft + client.models.modify_resource_drafts.delete(1) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving model resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the model snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an active Python + Dioptra Python client called ``client``:: + + # GET /api/v1/models/1/snapshots + client.models.snapshots.get(1) + + # GET /api/v1/models/1/snapshots/2 + client.models.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """ + The client for managing the tags sub-collection owned by the /models collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/models/1/tags + client.models.tags.get(1) + + # PUT /api/v1/models/1/tags + client.models.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/models/1/tags + client.models.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/models/1/tags/3 + client.models.tags.remove(1, tag_id=3) + + # DELETE /api/v1/models/1/tags + client.models.tags.remove(1) + """ + return self._tags + + @property + def versions(self) -> ModelVersionsSubCollectionClient[T]: + """The client for managing the versions sub-collection.""" + return self._versions + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of models. + + Args: + group_id: The group id the models belong to. If None, return models + from all groups that the user has access to. Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of models to return in the paged + response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for models using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, model_id: str | int) -> T: + """Get the model matching the provided id. + + Args: + model_id: The model id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(model_id)) + + def create(self, group_id: int, name: str, description: str | None = None) -> T: + """Creates an model. + + Args: + group_id: The id of the group that will own the model. + name: The name of the new model. + description: The description of the new model. Optional, defaults to + None. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "group": group_id, + "name": name, + } + + if description is not None: + json_["description"] = description + + return self._session.post(self.url, json_=json_) + + def modify_by_id( + self, model_id: str | int, name: str, description: str | None + ) -> T: + """Modify the model matching the provided id. + + Args: + model_id: The model id, an integer. + name: The new name of the queue. + description: The new description of the model. To remove the description, + pass None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"name": name} + + if description is not None: + json_["description"] = description + + return self._session.put(self.url, str(model_id), json_=json_) + + def delete_by_id(self, model_id: str | int) -> T: + """Delete the model matching the provided id. + + Args: + model_id: The model id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(model_id)) diff --git a/src/dioptra/client/plugin_parameter_types.py b/src/dioptra/client/plugin_parameter_types.py new file mode 100644 index 000000000..2ed83abfa --- /dev/null +++ b/src/dioptra/client/plugin_parameter_types.py @@ -0,0 +1,316 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import CollectionClient, DioptraSession +from .drafts import ( + ModifyResourceDraftsSubCollectionClient, + NewResourceDraftsSubCollectionClient, + make_draft_fields_validator, +) +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +DRAFT_FIELDS: Final[set[str]] = {"name", "description", "structure"} + +T = TypeVar("T") + + +class PluginParameterTypesCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /pluginParameterTypes collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "pluginParameterTypes" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the PluginParameterTypesCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new plugin parameter type drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new plugin parameter type drafts sub-collection. Below are examples + of how HTTP requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/pluginParameterTypes/drafts + client.plugin_parameter_types.new_resource_drafts.get() + + # GET /api/v1/pluginParameterTypes/drafts/1 + client.plugin_parameter_types.new_resource_drafts.get_by_id(draft_id=1) + + # PUT /api/v1/pluginParameterTypes/drafts/1 + client.plugin_parameter_types.new_resource_drafts.modify( + draft_id=1, + name="new-name", + description="new-description", + structure=None, + ) + + # POST /api/v1/pluginParameterTypes/drafts + client.plugin_parameter_types.new_resource_drafts.create( + group_id=1, name="name", description="description", structure=None + ) + + # DELETE /api/v1/pluginParameterTypes/drafts/1 + client.plugin_parameter_types.new_resource_drafts.delete(draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """ + The client for managing the plugin parameter type modification drafts + sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the plugin parameter type modification drafts sub-collection. Below are + examples of how HTTP requests to this sub-collection translate into method calls + for an active Python Dioptra Python client called ``client``:: + + # GET /api/v1/pluginParameterTypes/1/draft + client.plugin_parameter_types.modify_resource_drafts.get_by_id(1) + + # PUT /api/v1/pluginParameterTypes/1/draft + client.plugin_parameter_types.modify_resource_drafts.modify( + 1, name="new-name", description="new-description", structure=None + ) + + # POST /api/v1/pluginParameterTypes/1/draft + client.plugin_parameter_types.modify_resource_drafts.create( + 1, name="name", description="description", structure=None + ) + + # DELETE /api/v1/pluginParameterTypes/1/draft + client.plugin_parameter_types.modify_resource_drafts.delete(1) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving plugin parameter type resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the plugin parameter type snapshots sub-collection. Below are examples + of how HTTP requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/pluginParameterTypes/1/snapshots + client.plugin_parameter_types.snapshots.get(1) + + # GET /api/v1/pluginParameterTypes/1/snapshots/2 + client.plugin_parameter_types.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """ + The client for managing the tags sub-collection owned by the + /pluginParameterTypes collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/pluginParameterTypes/1/tags + client.plugin_parameter_types.tags.get(1) + + # PUT /api/v1/pluginParameterTypes/1/tags + client.plugin_parameter_types.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/pluginParameterTypes/1/tags + client.plugin_parameter_types.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/pluginParameterTypes/1/tags/3 + client.plugin_parameter_types.tags.remove(1, tag_id=3) + + # DELETE /api/v1/pluginParameterTypes/1/tags + client.plugin_parameter_types.tags.remove(1) + """ + return self._tags + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of plugin parameter types. + + Args: + group_id: The group id the plugin parameter types belong to. If None, return + plugin parameter types from all groups that the user has access to. + Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of plugin parameter types to return in the + paged response. Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for plugin parameter types using the Dioptra API's query + language. Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, plugin_parameter_type_id: str | int) -> T: + """Get the plugin parameter type matching the provided id. + + Args: + plugin_parameter_type_id: The plugin parameter type id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(plugin_parameter_type_id)) + + def create( + self, + group_id: int, + name: str, + description: str | None = None, + structure: dict[str, Any] | None = None, + ) -> T: + """Creates a plugin parameter type. + + Args: + group_id: The id of the group that will own the plugin parameter type. + name: The name of the new plugin parameter type. + description: The description of the new plugin parameter type. Optional, + defaults to None. + structure: Used to declare the internal structure of a plugin parameter + type. If None, then the plugin parameter type is a simple type. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = { + "group": group_id, + "name": name, + } + + if description is not None: + json_["description"] = description + + if structure is not None: + json_["structure"] = structure + + return self._session.post(self.url, json_=json_) + + def modify_by_id( + self, + plugin_parameter_type_id: str | int, + name: str, + description: str | None, + structure: dict[str, Any] | None, + ) -> T: + """Modify the plugin parameter type matching the provided id. + + Args: + plugin_parameter_type_id: The plugin parameter type id, an integer. + name: The new name of the plugin parameter type. + description: The new description of the plugin parameter type. To remove the + description, pass None. + structure: The internal structure of a plugin type. If None, then the + plugin parameter type is a simple type. To convert a structured type to + a simple type, pass None. Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_: dict[str, Any] = {"name": name} + + if description is not None: + json_["description"] = description + + if structure is not None: + json_["structure"] = structure + + return self._session.put(self.url, str(plugin_parameter_type_id), json_=json_) + + def delete_by_id(self, plugin_parameter_type_id: str | int) -> T: + """Delete the plugin parameter type matching the provided id. + + Args: + plugin_parameter_type_id: The plugin parameter type id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(plugin_parameter_type_id)) diff --git a/src/dioptra/client/plugins.py b/src/dioptra/client/plugins.py new file mode 100644 index 000000000..2960e7f99 --- /dev/null +++ b/src/dioptra/client/plugins.py @@ -0,0 +1,637 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import ( + CollectionClient, + DioptraClientError, + DioptraSession, + SubCollectionClient, +) +from .drafts import ( + ModifyResourceDraftsSubCollectionClient, + NewResourceDraftsSubCollectionClient, + make_draft_fields_validator, +) +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +PLUGINS_DRAFT_FIELDS: Final[set[str]] = {"name", "description"} +PLUGIN_FILES_DRAFT_FIELDS: Final[set[str]] = { + "filename", + "contents", + "tasks", + "description", +} + +T = TypeVar("T") + + +class PluginFilesSubCollectionClient(SubCollectionClient[T]): + """The client for managing Dioptra's /plugins collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "files" + + def __init__( + self, + session: DioptraSession[T], + root_collection: CollectionClient[T], + parent_sub_collections: list["SubCollectionClient[T]"] | None = None, + ) -> None: + """Initialize the PluginFilesSubCollectionClient instance. + + Args: + session: The Dioptra API session object. + root_collection: The client for the root collection that owns this + sub-collection. + parent_sub_collections: Unused in this client, must be None. + """ + if parent_sub_collections is not None: + raise DioptraClientError( + "The parent_sub_collections argument must be None for this client." + ) + + super().__init__( + session=session, + root_collection=root_collection, + parent_sub_collections=parent_sub_collections, + ) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=PLUGIN_FILES_DRAFT_FIELDS, + resource_name=f"plugin {self.name}", + ), + root_collection=root_collection, + parent_sub_collections=[self], + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=PLUGIN_FILES_DRAFT_FIELDS, + resource_name=f"plugin {self.name}", + ), + root_collection=root_collection, + parent_sub_collections=[self], + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, + root_collection=root_collection, + parent_sub_collections=[self], + ) + self._tags = TagsSubCollectionClient[T]( + session=session, + root_collection=root_collection, + parent_sub_collections=[self], + ) + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new plugin file drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new plugin file drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/plugins/1/files/drafts + client.plugins.files.new_resource_drafts.get(1) + + # GET /api/v1/plugins/1/files/drafts/1 + client.plugins.files.new_resource_drafts.get_by_id(1, draft_id=1) + + # PUT /api/v1/plugins/1/files/drafts/1 + client.plugins.files.new_resource_drafts.modify( + 1, + draft_id=1, + filename="new_name.py", + contents="", + tasks=[], + description="new-description" + ) + + # POST /api/v1/plugins/1/files/drafts + client.plugins.files.new_resource_drafts.create( + 1, + filename="name.py", + contents="", + tasks=[], + description="description" + ) + + # DELETE /api/v1/plugins/1/files/drafts/1 + client.plugins.files.new_resource_drafts.delete(1, draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """The client for managing the plugin file modification drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the plugin file modification drafts sub-collection. Below are examples + of how HTTP requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/plugins/1/files/2/draft + client.plugins.files.modify_resource_drafts.get_by_id(1, 2) + + # PUT /api/v1/plugins/1/files/2/draft + client.plugins.files.modify_resource_drafts.modify( + 1, + 2, + filename="new_name.py", + contents="", + tasks=[], + description="new-description" + ) + + # POST /api/v1/plugins/1/files/2/draft + client.plugins.files.modify_resource_drafts.create( + 1, + 2, + filename="name.py", + contents="", + tasks=[], + description="description" + ) + + # DELETE /api/v1/plugins/1/files/2/draft + client.plugins.files.modify_resource_drafts.delete(1, 2) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving plugin file resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the plugin file snapshots sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/plugins/1/files/2/snapshots + client.plugins.files.snapshots.get(1, 2) + + # GET /api/v1/plugins/1/files/2/snapshots/3 + client.plugins.files.snapshots.get_by_id(1, 2, snapshot_id=3) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """The client for managing the plugin file tags sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/plugins/1/files/2/tags + client.plugins.files.tags.get(1, 2) + + # PUT /api/v1/plugins/1/files/2/tags + client.plugins.files.tags.modify(1, 2, ids=[3, 4]) + + # POST /api/v1/plugins/1/files/2/tags + client.plugins.files.tags.append(1, 2, ids=[3, 4]) + + # DELETE /api/v1/plugins/1/files/2/tags/3 + client.plugins.files.tags.remove(1, 2, tag_id=3) + + # DELETE /api/v1/plugins/1/files/2/tags + client.plugins.files.tags.remove(1, 2) + """ + return self._tags + + def get( + self, + plugin_id: int | str, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of plugin files for a specific plugin. + + Args: + plugin_id: The id for the plugin that owns the plugin files. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of plugins to return in the paged response. + Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for plugins using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + return self._session.get( + self.build_sub_collection_url(plugin_id), + params=params, + ) + + def get_by_id(self, plugin_id: str | int, plugin_file_id: str | int) -> T: + """Get the plugin file matching the provided ids. + + Args: + plugin_id: The id for the plugin that owns the plugin file. + plugin_file_id: The plugin file id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(plugin_id), + str(plugin_file_id), + ) + + def create( + self, + plugin_id: str | int, + filename: str, + contents: str, + tasks: list[dict[str, Any]], + description: str | None = None, + ) -> T: + """Creates a plugin file. + + Args: + plugin_id: The id for the plugin that will own the new plugin file. + filename: The filename for the new plugin file. + contents: The contents of the new Python file. + tasks: The information needed to register the plugin tasks contained in the + plugin file, a list. + description: The description of the new plugin file. Optional, defaults to + None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"filename": filename, "contents": contents, "tasks": tasks} + + if description is not None: + json_["description"] = description + + return self._session.post(self.build_sub_collection_url(plugin_id), json_=json_) + + def modify_by_id( + self, + plugin_id: str | int, + plugin_file_id: str | int, + filename: str, + contents: str, + tasks: list[dict[str, Any]], + description: str | None = None, + ) -> T: + """Modify a plugin file matching the provided ids. + + Args: + plugin_id: The id for the plugin that owns the plugin file. + plugin_file_id: The plugin file id, an integer. + filename: The filename for the new plugin file. + contents: The contents of the new Python file. + tasks: The information needed to register the plugin tasks contained in the + plugin file, a list. + description: The description of the new plugin file. Optional, defaults to + None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"filename": filename, "contents": contents, "tasks": tasks} + + if description is not None: + json_["description"] = description + + return self._session.put( + self.build_sub_collection_url(plugin_id), str(plugin_file_id), json_=json_ + ) + + def delete_by_id(self, plugin_id: str | int, plugin_file_id: str | int) -> T: + """Delete a plugin file. + + Args: + plugin_id: The id for the plugin that owns the plugin file to be deleted. + plugin_file_id: The plugin file id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(plugin_id), str(plugin_file_id) + ) + + def delete_all(self, plugin_id: str | int) -> T: + """Delete all plugin files owned by the plugin matching the provided id. + + Args: + plugin_id: The id for the plugin that owns the plugin files to be deleted. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.build_sub_collection_url(plugin_id)) + + +class PluginsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /plugins collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "plugins" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the PluginsCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._files = PluginFilesSubCollectionClient[T]( + session=session, root_collection=self + ) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=PLUGINS_DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=PLUGINS_DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + + @property + def files(self) -> PluginFilesSubCollectionClient[T]: + """The client for managing the plugin files sub-collection.""" + return self._files + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new plugin drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new plugin drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/plugins/drafts + client.plugins.new_resource_drafts.get() + + # GET /api/v1/plugins/drafts/1 + client.plugins.new_resource_drafts.get_by_id(draft_id=1) + + # PUT /api/v1/plugins/drafts/1 + client.plugins.new_resource_drafts.modify( + draft_id=1, name="new-name", description="new-description" + ) + + # POST /api/v1/plugins/drafts + client.plugins.new_resource_drafts.create( + group_id=1, name="name", description="description" + ) + + # DELETE /api/v1/plugins/drafts/1 + client.plugins.new_resource_drafts.delete(draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """The client for managing the plugin modification drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the plugin modification drafts sub-collection. Below are examples of + how HTTP requests to this sub-collection translate into method calls for an + active Python Dioptra Python client called ``client``:: + + # GET /api/v1/plugins/1/draft + client.plugins.modify_resource_drafts.get_by_id(1) + + # PUT /api/v1/plugins/1/draft + client.plugins.modify_resource_drafts.modify( + 1, name="new-name", description="new-description" + ) + + # POST /api/v1/plugins/1/draft + client.plugins.modify_resource_drafts.create( + 1, name="name", description="description" + ) + + # DELETE /api/v1/plugins/1/draft + client.plugins.modify_resource_drafts.delete(1) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving plugin resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the plugin snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an active Python + Dioptra Python client called ``client``:: + + # GET /api/v1/plugins/1/snapshots + client.plugins.snapshots.get(1) + + # GET /api/v1/plugins/1/snapshots/2 + client.plugins.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """The client for managing the plugin tags sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/plugins/1/tags + client.plugins.tags.get(1) + + # PUT /api/v1/plugins/1/tags + client.plugins.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/plugins/1/tags + client.plugins.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/plugins/1/tags/3 + client.plugins.tags.remove(1, tag_id=3) + + # DELETE /api/v1/plugins/1/tags + client.plugins.tags.remove(1) + """ + return self._tags + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of plugins. + + Args: + group_id: The group id the plugins belong to. If None, return plugins from + all groups that the user has access to. Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of plugins to return in the paged response. + Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for plugins using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, plugin_id: str | int) -> T: + """Get the plugin matching the provided id. + + Args: + plugin_id: The plugin id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(plugin_id)) + + def create(self, group_id: int, name: str, description: str | None = None) -> T: + """Creates a plugin. + + Args: + group_id: The id of the group that will own the plugin. + name: The name of the new plugin. + description: The description of the new plugin. Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "group": group_id, + "name": name, + } + + if description is not None: + json_["description"] = description + + return self._session.post(self.url, json_=json_) + + def modify_by_id( + self, plugin_id: str | int, name: str, description: str | None + ) -> T: + """Modify the plugin matching the provided id. + + Args: + plugin_id: The plugin id, an integer. + name: The new name of the plugin. + description: The new description of the plugin. To remove the description, + pass None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"name": name} + + if description is not None: + json_["description"] = description + + return self._session.put(self.url, str(plugin_id), json_=json_) + + def delete_by_id(self, plugin_id: str | int) -> T: + """Delete the plugin matching the provided id. + + Args: + plugin_id: The plugin id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(plugin_id)) diff --git a/src/dioptra/client/queues.py b/src/dioptra/client/queues.py new file mode 100644 index 000000000..d7ece3af1 --- /dev/null +++ b/src/dioptra/client/queues.py @@ -0,0 +1,286 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, Final, TypeVar + +from .base import CollectionClient, DioptraSession +from .drafts import ( + ModifyResourceDraftsSubCollectionClient, + NewResourceDraftsSubCollectionClient, + make_draft_fields_validator, +) +from .snapshots import SnapshotsSubCollectionClient +from .tags import TagsSubCollectionClient + +DRAFT_FIELDS: Final[set[str]] = {"name", "description"} + +T = TypeVar("T") + + +class QueuesCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /queues collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "queues" + + def __init__(self, session: DioptraSession[T]) -> None: + """Initialize the QueuesCollectionClient instance. + + Args: + session: The Dioptra API session object. + """ + super().__init__(session) + self._new_resource_drafts = NewResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._modify_resource_drafts = ModifyResourceDraftsSubCollectionClient[T]( + session=session, + validate_fields_fn=make_draft_fields_validator( + draft_fields=DRAFT_FIELDS, + resource_name=self.name, + ), + root_collection=self, + ) + self._snapshots = SnapshotsSubCollectionClient[T]( + session=session, root_collection=self + ) + self._tags = TagsSubCollectionClient[T](session=session, root_collection=self) + + @property + def new_resource_drafts(self) -> NewResourceDraftsSubCollectionClient[T]: + """The client for managing the new queue drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the new queue drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/queues/drafts + client.queues.new_resource_drafts.get() + + # GET /api/v1/queues/drafts/1 + client.queues.new_resource_drafts.get_by_id(draft_id=1) + + # PUT /api/v1/queues/drafts/1 + client.queues.new_resource_drafts.modify( + draft_id=1, name="new-name", description="new-description" + ) + + # POST /api/v1/queues/drafts + client.queues.new_resource_drafts.create( + group_id=1, name="name", description="description" + ) + + # DELETE /api/v1/queues/drafts/1 + client.queues.new_resource_drafts.delete(draft_id=1) + """ + return self._new_resource_drafts + + @property + def modify_resource_drafts(self) -> ModifyResourceDraftsSubCollectionClient[T]: + """The client for managing the queue modification drafts sub-collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the queue modification drafts sub-collection. Below are examples of how + HTTP requests to this sub-collection translate into method calls for an active + Python Dioptra Python client called ``client``:: + + # GET /api/v1/queues/1/draft + client.queues.modify_resource_drafts.get_by_id(1) + + # PUT /api/v1/queues/1/draft + client.queues.modify_resource_drafts.modify( + 1, name="new-name", description="new-description" + ) + + # POST /api/v1/queues/1/draft + client.queues.modify_resource_drafts.create( + 1, name="name", description="description" + ) + + # DELETE /api/v1/queues/1/draft + client.queues.modify_resource_drafts.delete(1) + """ + return self._modify_resource_drafts + + @property + def snapshots(self) -> SnapshotsSubCollectionClient[T]: + """The client for retrieving queue resource snapshots. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the queue snapshots sub-collection. Below are examples of how HTTP + requests to this sub-collection translate into method calls for an active Python + Dioptra Python client called ``client``:: + + # GET /api/v1/queues/1/snapshots + client.queues.snapshots.get(1) + + # GET /api/v1/queues/1/snapshots/2 + client.queues.snapshots.get_by_id(1, snapshot_id=2) + """ + return self._snapshots + + @property + def tags(self) -> TagsSubCollectionClient[T]: + """ + The client for managing the tags sub-collection owned by the /queues collection. + + Each client method in the sub-collection accepts an arbitrary number of + positional arguments called ``*resource_ids``. These are the parent resource ids + that own the tags sub-collection. Below are examples of how HTTP requests to + this sub-collection translate into method calls for an active Python Dioptra + Python client called ``client``:: + + # GET /api/v1/queues/1/tags + client.queues.tags.get(1) + + # PUT /api/v1/queues/1/tags + client.queues.tags.modify(1, ids=[2, 3]) + + # POST /api/v1/queues/1/tags + client.queues.tags.append(1, ids=[2, 3]) + + # DELETE /api/v1/queues/1/tags/3 + client.queues.tags.remove(1, tag_id=3) + + # DELETE /api/v1/queues/1/tags + client.queues.tags.remove(1) + """ + return self._tags + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of queues. + + Args: + group_id: The group id the queues belong to. If None, return queues from all + groups that the user has access to. Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of queues to return in the paged response. + Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for queues using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, queue_id: str | int) -> T: + """Get the queue matching the provided id. + + Args: + queue_id: The queue id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(queue_id)) + + def create(self, group_id: int, name: str, description: str | None = None) -> T: + """Creates a queue. + + Args: + group_id: The id of the group that will own the queue. + name: The name of the new queue. + description: The description of the new queue. Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "group": group_id, + "name": name, + } + + if description is not None: + json_["description"] = description + + return self._session.post(self.url, json_=json_) + + def modify_by_id( + self, queue_id: str | int, name: str, description: str | None + ) -> T: + """Modify the queue matching the provided id. + + Args: + queue_id: The queue id, an integer. + name: The new name of the queue. + description: The new description of the queue. To remove the description, + pass None. + + Returns: + The response from the Dioptra API. + """ + json_ = {"name": name} + + if description is not None: + json_["description"] = description + + return self._session.put(self.url, str(queue_id), json_=json_) + + def delete_by_id(self, queue_id: str | int) -> T: + """Delete the queue matching the provided id. + + Args: + queue_id: The queue id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(queue_id)) diff --git a/src/dioptra/client/sessions.py b/src/dioptra/client/sessions.py new file mode 100644 index 000000000..8a6ef9202 --- /dev/null +++ b/src/dioptra/client/sessions.py @@ -0,0 +1,718 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +import logging +from abc import ABC, abstractmethod +from http import HTTPStatus +from pathlib import Path +from typing import Any, Callable, ClassVar, Final, Generic, TypeVar, cast +from urllib.parse import urlparse, urlunparse + +import requests + +from .base import ( + APIConnectionError, + DioptraClientError, + DioptraResponseProtocol, + DioptraSession, + JSONDecodeError, + StatusCodeError, +) + +LOGGER = logging.getLogger(__name__) + +DIOPTRA_API_VERSION: Final[str] = "v1" + +T = TypeVar("T") + + +def wrap_request_method( + func: Any, +) -> Callable[..., DioptraResponseProtocol]: + """Wrap a requests method to log the request and response data. + + Args: + func: The requests method to wrap. + + Returns: + A wrapped version of the requests method that logs the request and response + data. + """ + + def wrapper(url: str, *args, **kwargs) -> DioptraResponseProtocol: + """Wrap the requests method to log the request and response data. + + The returned response object will follow the DioptraResponseProtocol interface. + + Args: + url: The URL of the API endpoint. + *args: Additional arguments to pass to the requests method. + **kwargs: Additional keyword arguments to pass to the requests method. + + Returns: + The response from the requests method. + + Raises: + APIConnectionError: If the connection to the REST API fails. + """ + LOGGER.debug( + "Request made: url=%s method=%s", + url, + str(func.__name__).upper(), + ) + + try: + response = cast(DioptraResponseProtocol, func(url, *args, **kwargs)) + + except requests.ConnectionError as err: + raise APIConnectionError(f"Connection failed: {url}") from err + + LOGGER.debug("Response received: status_code=%s", str(response.status_code)) + return response + + return wrapper + + +def convert_response_to_dict(response: DioptraResponseProtocol) -> dict[str, Any]: + """Convert a response object to a JSON-like Python dictionary. + + Args: + response: A response object that follows the DioptraResponseProtocol interface. + + Returns: + A Python dictionary containing the response data. + + Raises: + StatusCodeError: If the response status code is not in the 2xx range. + JSONDecodeError: If the response data cannot be parsed as JSON. + """ + if not is_2xx(response.status_code): + LOGGER.debug( + "HTTP error code returned: status_code=%s method=%s url=%s text=%s", + response.status_code, + response.request.method, + response.request.url, + response.text, + ) + raise StatusCodeError(f"Error code returned: {response.status_code}") + + try: + response_dict = response.json() + + except requests.JSONDecodeError as err: + LOGGER.debug( + "Failed to parse HTTP response data as JSON: method=%s url=%s text=%s", + response.request.method, + response.request.url, + response.text, + ) + raise JSONDecodeError("Failed to parse HTTP response data as JSON") from err + + return response_dict + + +def is_2xx(status_code: int) -> bool: + """Check if the status code is in the 2xx range. + + Args: + status_code: The HTTP status code to check. + + Returns: + True if the status code is in the 2xx range, False otherwise. + """ + return status_code >= HTTPStatus.OK and status_code < HTTPStatus.MULTIPLE_CHOICES + + +class BaseDioptraRequestsSession(DioptraSession[T], ABC, Generic[T]): + """ + The interface for communicating with the Dioptra API using the requests library. + + Attributes: + DOWNLOAD_CHUNK_SIZE: The number of bytes to read into memory per chunk when + downloading a file from the API. + """ + + DOWNLOAD_CHUNK_SIZE: ClassVar[int] = 10 * 1024 + + def __init__(self, address: str) -> None: + """Initialize the Dioptra API session object. + + Args: + address: The base URL of the API endpoints. + """ + self._scheme, self._netloc, self._path, _, _, _ = urlparse(address) + self._session: requests.Session | None = None + + @property + def url(self) -> str: + """The base URL of the API endpoints.""" + return urlunparse((self._scheme, self._netloc, self._path, "", "", "")) + + def connect(self) -> None: + """Connect to the API using a requests Session.""" + if self._session is None: + self._session = requests.Session() + + def close(self) -> None: + """Close the connection to the API by closing the requests Session.""" + if self._session is None: + return None + + self._session.close() + self._session = None + + def make_request( + self, + method_name: str, + url: str, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a request to the API. + + Args: + method_name: The HTTP method to use. Must be one of "get", "patch", "post", + "put", or "delete". + url: The URL of the API endpoint. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + """ + session = self._get_requests_session() + methods_registry: dict[str, Callable[..., DioptraResponseProtocol]] = { + "get": wrap_request_method(session.get), + "patch": wrap_request_method(session.patch), + "post": wrap_request_method(session.post), + "put": wrap_request_method(session.put), + "delete": wrap_request_method(session.delete), + } + + if method_name not in methods_registry: + raise DioptraClientError( + f"Unsupported method requested (reason: must be one of " + f"{sorted(methods_registry.keys())}): {method_name}." + ) + + method = methods_registry[method_name] + method_kwargs: dict[str, Any] = {} + + if json_: + method_kwargs["json"] = json_ + + if params: + method_kwargs["params"] = params + + return method(url, **method_kwargs) + + def download( + self, + endpoint: str, + *parts, + output_path: Path, + params: dict[str, Any] | None = None, + ) -> Path: + """Download a file from the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + output_path: The path where the downloaded file should be saved. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + The path to the downloaded file. + + Raises: + DioptraClientError: If the output path is a directory or if creating the + output directory fails. + StatusCodeError: If the response status code is not in the 2xx range. + """ + if output_path.exists() and output_path.is_dir(): + raise DioptraClientError( + f"Invalid output filepath (reason: path is a directory): {output_path}" + ) + + if not output_path.parent.exists(): + try: + output_path.parent.mkdir(parents=True) + + except OSError as err: + raise DioptraClientError( + f"Output directory creation failed (reason: {err.strerror}): " + f"{output_path.parent}" + ) from err + + kwargs: dict[str, Any] = {} + + if params: + kwargs["params"] = params + + session = self._get_requests_session() + response = session.get(self.build_url(endpoint, *parts), stream=True, **kwargs) + + if not is_2xx(response.status_code): + LOGGER.debug( + "HTTP error code returned: status_code=%s method=%s url=%s text=%s", + response.status_code, + response.request.method, + response.request.url, + response.text, + ) + raise StatusCodeError(f"Error code returned: {response.status_code}") + + with output_path.open(mode="wb") as f: + for chunk in response.iter_content(chunk_size=self.DOWNLOAD_CHUNK_SIZE): + f.write(chunk) + + return output_path + + @abstractmethod + def get(self, endpoint: str, *parts, params: dict[str, Any] | None = None) -> T: + """Make a GET request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def patch( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a PATCH request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def post( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a POST request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def delete( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a DELETE request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + @abstractmethod + def put( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> T: + """Make a PUT request to the API. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + The response from the API. + """ + raise NotImplementedError + + def _get_requests_session(self) -> requests.Session: + """Get the requests Session object. + + This method will start a new session if one does not already exist. + + Returns: + The requests Session object. + + Raises: + APIConnectionError: If the session connection fails. + """ + self.connect() + + if self._session is None: + raise APIConnectionError(f"Failed to start session connection: {self.url}") + + return self._session + + +class DioptraRequestsSession(BaseDioptraRequestsSession[DioptraResponseProtocol]): + """ + The interface for communicating with the Dioptra API using the requests library. + + The responses from the HTTP methods will be requests Response objects, which follow + the DioptraResponseProtocol interface. + + Attributes: + DOWNLOAD_CHUNK_SIZE: The number of bytes to read into memory per chunk when + downloading a file from the API. + """ + + def get( + self, endpoint: str, *parts, params: dict[str, Any] | None = None + ) -> DioptraResponseProtocol: + """Make a GET request to the API. + + The response will be a requests Response object, which follows the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + A requests Response object. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + """ + return self._get(endpoint, *parts, params=params) + + def patch( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a PATCH request to the API. + + The response will be a requests Response object, which follows the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A requests Response object. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + """ + return self._patch(endpoint, *parts, params=params, json_=json_) + + def post( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a POST request to the API. + + The response will be a requests Response object, which follows the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A requests Response object. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + """ + return self._post(endpoint, *parts, params=params, json_=json_) + + def delete( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a DELETE request to the API. + + The response will be a requests Response object, which follows the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A requests Response object. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + """ + return self._delete(endpoint, *parts, params=params, json_=json_) + + def put( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> DioptraResponseProtocol: + """Make a PUT request to the API. + + The response will be a requests Response object, which follows the + DioptraResponseProtocol interface. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A requests Response object. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + """ + return self._put(endpoint, *parts, params=params, json_=json_) + + +class DioptraRequestsSessionJson(BaseDioptraRequestsSession[dict[str, Any]]): + """ + The interface for communicating with the Dioptra API using the requests library. + + The responses from the HTTP methods will be JSON-like Python dictionaries. Responses + that are not in the 2xx range will raise an exception. + + Attributes: + DOWNLOAD_CHUNK_SIZE: The number of bytes to read into memory per chunk when + downloading a file from the API. + """ + + def get( + self, endpoint: str, *parts, params: dict[str, Any] | None = None + ) -> dict[str, Any]: + """Make a GET request to the API. + + The response will be a JSON-like Python dictionary. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + + Returns: + A Python dictionary containing the response data. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + JSONDecodeError: If the response data cannot be parsed as JSON. + StatusCodeError: If the response status code is not in the 2xx range. + """ + return convert_response_to_dict(self._get(endpoint, *parts, params=params)) + + def patch( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Make a PATCH request to the API. + + The response will be a JSON-like Python dictionary. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A Python dictionary containing the response data. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + JSONDecodeError: If the response data cannot be parsed as JSON. + StatusCodeError: If the response status code is not in the 2xx range. + """ + return convert_response_to_dict( + self._patch(endpoint, *parts, params=params, json_=json_) + ) + + def post( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Make a POST request to the API. + + The response will be a JSON-like Python dictionary. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A Python dictionary containing the response data. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + JSONDecodeError: If the response data cannot be parsed as JSON. + StatusCodeError: If the response status code is not in the 2xx range. + """ + return convert_response_to_dict( + self._post(endpoint, *parts, params=params, json_=json_) + ) + + def delete( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Make a DELETE request to the API. + + The response will be a JSON-like Python dictionary. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A Python dictionary containing the response data. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + JSONDecodeError: If the response data cannot be parsed as JSON. + StatusCodeError: If the response status code is not in the 2xx range. + """ + return convert_response_to_dict( + self._delete(endpoint, *parts, params=params, json_=json_) + ) + + def put( + self, + endpoint: str, + *parts, + params: dict[str, Any] | None = None, + json_: dict[str, Any] | None = None, + ) -> dict[str, Any]: + """Make a PUT request to the API. + + The response will be a JSON-like Python dictionary. + + Args: + endpoint: The base URL of the API endpoint. + *parts: Additional parts to append to the base URL. + params: The query parameters to include in the request. Optional, defaults + to None. + json_: The JSON data to include in the request. Optional, defaults to None. + + Returns: + A Python dictionary containing the response data. + + Raises: + APIConnectionError: If the connection to the REST API fails. + DioptraClientError: If an unsupported method is requested. + JSONDecodeError: If the response data cannot be parsed as JSON. + StatusCodeError: If the response status code is not in the 2xx range. + """ + return convert_response_to_dict( + self._put(endpoint, *parts, params=params, json_=json_) + ) diff --git a/src/dioptra/client/snapshots.py b/src/dioptra/client/snapshots.py new file mode 100644 index 000000000..39e6b09d6 --- /dev/null +++ b/src/dioptra/client/snapshots.py @@ -0,0 +1,88 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, TypeVar + +import structlog +from structlog.stdlib import BoundLogger + +from .base import SubCollectionClient + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + +T = TypeVar("T") + + +class SnapshotsSubCollectionClient(SubCollectionClient[T]): + """The client for managing a snapshots sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "snapshots" + + def get( + self, + *resource_ids: str | int, + index: int = 0, + page_length: int = 10, + search: str | None = None, + ) -> T: + """Get the list of snapshots for a given resource. + + Args: + *resource_ids: The parent resource ids that own the snapshots + sub-collection. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of snapshots to return in the paged + response. Optional, defaults to 10. + search: Search for snapshots using the Dioptra API's query language. + Optional, defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if search is not None: + params["search"] = search + + return self._session.get( + self.build_sub_collection_url(*resource_ids), params=params + ) + + def get_by_id( + self, + *resource_ids: str | int, + snapshot_id: int, + ) -> T: + """Get a snapshot by its id for a specific resource. + + Args: + *resource_ids: The parent resource ids that own the snapshots + sub-collection. + snapshot_id: The snapshot id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get( + self.build_sub_collection_url(*resource_ids), str(snapshot_id) + ) diff --git a/src/dioptra/client/tags.py b/src/dioptra/client/tags.py new file mode 100644 index 000000000..34a6645b2 --- /dev/null +++ b/src/dioptra/client/tags.py @@ -0,0 +1,268 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, TypeVar + +from .base import CollectionClient, SubCollectionClient + +T = TypeVar("T") + + +class TagsCollectionClient(CollectionClient[T]): + """The client for interacting with the Dioptra API's /tags collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "tags" + + def get( + self, + group_id: int | None = None, + index: int = 0, + page_length: int = 10, + sort_by: str | None = None, + descending: bool | None = None, + search: str | None = None, + ) -> T: + """Get a list of tags. + + Args: + group_id: The group id the tags belong to. If None, return tags from all + groups that the user has access to. Optional, defaults to None. + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of tags to return in the paged response. + Optional, defaults to 10. + sort_by: The field to use to sort the returned list. Optional, defaults to + None. + descending: Sort the returned list in descending order. Optional, defaults + to None. + search: Search for tags using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if sort_by is not None: + params["sortBy"] = sort_by + + if descending is not None: + params["descending"] = descending + + if search is not None: + params["search"] = search + + if group_id is not None: + params["groupId"] = group_id + + return self._session.get( + self.url, + params=params, + ) + + def get_by_id(self, tag_id: str | int) -> T: + """Get the tag matching the provided id. + + Args: + tag_id: The tag id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(tag_id)) + + def create(self, group_id: int, name: str) -> T: + """Creates a tag. + + Args: + group_id: The id of the group that will own the tag. + name: The name of the new tag. + + Returns: + The response from the Dioptra API. + """ + json_ = { + "group": group_id, + "name": name, + } + + return self._session.post(self.url, json_=json_) + + def modify_by_id(self, tag_id: str | int, name: str) -> T: + """Modify the tag matching the provided id. + + Args: + tag_id: The tag id, an integer. + name: The new name of the tag. + + Returns: + The response from the Dioptra API. + """ + json_ = {"name": name} + + return self._session.put(self.url, str(tag_id), json_=json_) + + def delete_by_id(self, tag_id: str | int) -> T: + """Delete the tag matching the provided id. + + Args: + tag_id: The tag id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, str(tag_id)) + + def get_resources_for_tag( + self, + tag_id: str | int, + resource_type: str | None = None, + index: int = 0, + page_length: int = 10, + ) -> T: + """Get a list of resources labeled with a tag. + + Args: + tag_id: The tag id, an integer. + resource_type: The type of resource to filter by. If None, return all + resources associated with the tag. Optional, defaults to None. + index: The paging index. + page_length: The maximum number of tags to return in the paged response. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if resource_type is not None: + params["resourceType"] = resource_type + + return self._session.get( + self.url, + str(tag_id), + "resources", + params=params, + ) + + +class TagsSubCollectionClient(SubCollectionClient[T]): + """The client for managing a tags sub-collection. + + Attributes: + name: The name of the sub-collection. + """ + + name: ClassVar[str] = "tags" + + def get(self, *resource_ids: str | int) -> T: + """Get a list of tags. + + Args: + *resource_ids: The parent resource ids that own the tags sub-collection. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.build_sub_collection_url(*resource_ids)) + + def modify( + self, + *resource_ids: str | int, + ids: list[int], + ) -> T: + """Change the list of tags associated with an endpoint resource. + + This method overwrites the existing list of tags associated with an endpoint + resource. To non-destructively append multiple tags, use the `append` method. To + delete an individual tag, use the `remove` method. + + Args: + *resource_ids: The parent resource ids that own the tags sub-collection. + ids: The list of tag ids to set on the resource. + + Returns: + The response from the Dioptra API. + """ + return self._session.put( + self.build_sub_collection_url(*resource_ids), + json_={"ids": ids}, + ) + + def append( + self, + *resource_ids: str | int, + ids: list[int], + ) -> T: + """Append one or more tags to an endpoint resource. + + Tag ids that have already been appended to the endpoint resource will be + ignored. + + Args: + *resource_ids: The parent resource ids that own the tags sub-collection. + ids: The list of tag ids to append to the endpoint resource. + + Returns: + The response from the Dioptra API. + """ + return self._session.post( + self.build_sub_collection_url(*resource_ids), + json_={"ids": ids}, + ) + + def remove( + self, + *resource_ids: str | int, + tag_id: int, + ) -> T: + """Remove a tag from an endpoint resource. + + Args: + *resource_ids: The parent resource ids that own the tags sub-collection. + tag_id: The id of the tag to remove from the endpoint resource. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete( + self.build_sub_collection_url(*resource_ids), str(tag_id) + ) + + def remove_all( + self, + *resource_ids: str | int, + ) -> T: + """Remove all tags from an endpoint resource. + + This method will remove all tags from the endpoint resource and cannot be + reversed. To remove individual tags, use the `remove` method. + + Args: + *resource_ids: The parent resource ids that own the tags sub-collection. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.build_sub_collection_url(*resource_ids)) diff --git a/src/dioptra/client/users.py b/src/dioptra/client/users.py new file mode 100644 index 000000000..709404c8d --- /dev/null +++ b/src/dioptra/client/users.py @@ -0,0 +1,178 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from typing import Any, ClassVar, TypeVar + +from .base import CollectionClient + +T = TypeVar("T") + + +class UsersCollectionClient(CollectionClient[T]): + """The client for interacting with the Dioptra API's /users collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "users" + + def get( + self, index: int = 0, page_length: int = 10, search: str | None = None + ) -> T: + """Get a list of Dioptra users. + + Args: + index: The paging index. Optional, defaults to 0. + page_length: The maximum number of users to return in the paged response. + Optional, defaults to 10. + search: Search for users using the Dioptra API's query language. Optional, + defaults to None. + + Returns: + The response from the Dioptra API. + """ + params: dict[str, Any] = { + "index": index, + "pageLength": page_length, + } + + if search is not None: + params["search"] = search + + return self._session.get( + self.url, + params=params, + ) + + def create(self, username: str, email: str, password: str) -> T: + """Creates a Dioptra user. + + Args: + username: The username of the new user. + email: The email address of the new user. + password: The password to set for the new user. + + Returns: + The response from the Dioptra API. + """ + + return self._session.post( + self.url, + json_={ + "username": username, + "email": email, + "password": password, + "confirmPassword": password, + }, + ) + + def get_by_id(self, user_id: str | int) -> T: + """Get the user matching the provided id. + + Args: + user_id: The user id, an integer. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, str(user_id)) + + def change_password_by_id( + self, user_id: str | int, old_password: str, new_password: str + ) -> T: + """Change the password of the user matching the provided id. + + This primary use case for using this over `change_current_user_password` is if + your password has expired and you need to update it before you can log in. + + Args: + user_id: The user id, an integer. + old_password: The user's current password. The password change will fail if + this is incorrect. + new_password: The new password to set for the user. + + Returns: + The response from the Dioptra API. + """ + return self._session.post( + self.url, + str(user_id), + "password", + json_={ + "oldPassword": old_password, + "newPassword": new_password, + "confirmNewPassword": new_password, + }, + ) + + def get_current(self) -> T: + """Get details about the currently logged-in user. + + Returns: + The response from the Dioptra API. + """ + return self._session.get(self.url, "current") + + def delete_current_user(self, password: str) -> T: + """Delete the currently logged-in user. + + Args: + password: The password of the currently logged-in user. The deletion will + fail if this is incorrect. + + Returns: + The response from the Dioptra API. + """ + return self._session.delete(self.url, "current", json_={"password": password}) + + def modify_current_user(self, username: str, email: str) -> T: + """Modify details about the currently logged-in user. + + Args: + username: The new username for the currently logged-in user. + email: The new email address for the currently logged-in user. + + Returns: + The response from the Dioptra API. + """ + return self._session.put( + self.url, + "current", + json_={"username": username, "email": email}, + ) + + def change_current_user_password(self, old_password: str, new_password: str) -> T: + """Change the currently logged-in user's password. + + Args: + old_password: The currently logged-in user's current password. The password + change will fail if this is incorrect. + new_password: The new password to set for the currently logged-in user. + + Returns: + The response from the Dioptra API. + """ + return self._session.post( + self.url, + "current", + "password", + json_={ + "oldPassword": old_password, + "newPassword": new_password, + "confirmNewPassword": new_password, + }, + ) diff --git a/src/dioptra/client/workflows.py b/src/dioptra/client/workflows.py new file mode 100644 index 000000000..8dfa4f6c6 --- /dev/null +++ b/src/dioptra/client/workflows.py @@ -0,0 +1,88 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +from pathlib import Path +from typing import ClassVar, Final, TypeVar + +from .base import CollectionClient, IllegalArgumentError + +T = TypeVar("T") + +JOB_FILES_DOWNLOAD: Final[str] = "jobFilesDownload" + + +class WorkflowsCollectionClient(CollectionClient[T]): + """The client for managing Dioptra's /workflows collection. + + Attributes: + name: The name of the collection. + """ + + name: ClassVar[str] = "workflows" + + def download_job_files( + self, + job_id: str | int, + file_type: str = "tar_gz", + output_dir: Path | None = None, + file_stem: str = "job_files", + ) -> Path: + """ + Download a compressed file archive containing the files needed to execute a + submitted job. + + The downloaded file's name is the file stem followed by the file extension + corresponding to the specified file type. For example, if the file stem is + "job_files" and the file type is "tar_gz", the downloaded file will be named + "job_files.tar.gz". + + Args: + job_id: The job id, an integer. + file_type: The type of file to download. Options are "tar_gz" and "zip". + Optional, defaults to "tar_gz". + output_dir: The directory where the downloaded file should be saved. The + directory will be created if it does not exist. If None, the file will + be saved in the current working directory. Optional, defaults to None. + file_stem: The file stem to use for naming the downloaded file. Optional, + defaults to "job_files". + + Returns: + The path to the downloaded file. + + Raises: + IllegalArgumentError: If the file type is not one of "tar_gz" or "zip". + """ + file_extensions = { + "tar_gz": ".tar.gz", + "zip": ".zip", + } + + if (output_ext := file_extensions.get(file_type)) is None: + raise IllegalArgumentError( + 'Illegal value for file_type (reason: must be one of "tar_gz", "zip"): ' + f"{file_type}." + ) + + job_files_path = ( + Path(file_stem).with_suffix(output_ext) + if output_dir is None + else Path(output_dir, file_stem).with_suffix(output_ext) + ) + params = {"jobId": job_id, "fileType": file_type} + + return self._session.download( + self.url, JOB_FILES_DOWNLOAD, output_path=job_files_path, params=params + ) diff --git a/src/dioptra/restapi/db/db.py b/src/dioptra/restapi/db/db.py index 970509bd5..0b467bbb9 100644 --- a/src/dioptra/restapi/db/db.py +++ b/src/dioptra/restapi/db/db.py @@ -34,6 +34,8 @@ from sqlalchemy.ext.mutable import MutableDict from sqlalchemy.orm import DeclarativeBase, MappedAsDataclass, mapped_column +from dioptra.restapi.db.models.utils import depth_limited_repr + from .custom_types import GUID, TZDateTime intpk = Annotated[ @@ -79,10 +81,13 @@ def _set_sqlite_pragma( cursor.close() -class Base(DeclarativeBase, MappedAsDataclass): +class Base(DeclarativeBase, MappedAsDataclass, repr=False): """The base ORM class.""" metadata = metadata_obj + def __repr__(self): + return depth_limited_repr(self) + db = SQLAlchemy(model_class=Base) diff --git a/src/dioptra/restapi/db/models/utils.py b/src/dioptra/restapi/db/models/utils.py new file mode 100644 index 000000000..aed28d68a --- /dev/null +++ b/src/dioptra/restapi/db/models/utils.py @@ -0,0 +1,160 @@ +# This Software (Dioptra) is being made available as a public service by the +# National Institute of Standards and Technology (NIST), an Agency of the United +# States Department of Commerce. This software was developed in part by employees of +# NIST and in part by NIST contractors. Copyright in portions of this software that +# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant +# to Title 17 United States Code Section 105, works of NIST employees are not +# subject to copyright protection in the United States. However, NIST may hold +# international copyright in software created by its employees and domestic +# copyright (or licensing rights) in portions of software that were assigned or +# licensed to NIST. To the extent that NIST holds copyright in this software, it is +# being made available under the Creative Commons Attribution 4.0 International +# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts +# of the software developed or licensed by NIST. +# +# ACCESS THE FULL CC BY 4.0 LICENSE HERE: +# https://creativecommons.org/licenses/by/4.0/legalcode +import io +from collections.abc import Iterable, Mapping +from typing import Any + +import sqlalchemy as sa +import sqlalchemy.orm as sao + + +def depth_limited_repr(value: Any, max_depth: int = 2) -> str: + """ + Create a repr string for the given instance, with a depth limit to avoid + excessively large reprs. Where the repr would exceed the depth cutoff, + it includes "..." as a placeholder. + + The depth does not count depth of normal list/map-like data structures; it + counts nested instances of ORM-mapped objects. If a data structure is + passed which does not contain any ORM instances, max_depth will have no + effect and the value will be repr'd in its entirety. + + Args: + value: The value to repr + max_depth: A maximum depth + + Returns: + A repr string + """ + buf = io.StringIO() + _depth_limited_repr(value, buf, max_depth) + + return buf.getvalue() + + +def _depth_limited_repr(value: Any, out: io.TextIOBase, max_depth: int) -> None: + """ + Write a repr of the given value to the given stream. + + Args: + value: The value to repr + out: A textual output stream + max_depth: A maximum depth + """ + if isinstance(value, sao.DeclarativeBase): + + # Let depth be only applicable here; the primary use case for this + # function is to limit traversal of ORM object graphs, not arbitrary + # data structures. + if max_depth <= 0: + out.write("...") + + else: + _depth_limited_orm_object_repr(value, out, max_depth - 1) + + elif isinstance(value, Mapping): + _depth_limited_map_repr(value, out, max_depth) + + # Pretty general; will include tuples and sets too. Do we care about + # having a more "faithful" repr for them? + elif isinstance(value, Iterable) and not isinstance(value, (str, bytes, bytearray)): + _depth_limited_iter_repr(value, out, max_depth) + + else: + out.write(repr(value)) + + +def _depth_limited_orm_object_repr( + obj: sao.DeclarativeBase, out: io.TextIOBase, max_depth: int +) -> None: + """ + Write a repr of the given ORM-mapped object to the given stream. + + Args: + obj: The ORM-mapped object + out: A textual output stream + max_depth: A maximum depth + """ + state = sa.inspect(obj) + if state is None: + # Is it possible to have an instance of an ORM-mapped class, + # which SQLAlchemy does not know about? + out.write("") + + else: + out.write(type(obj).__name__) + out.write("(") + first = True + for attr_name, attr_state in state.attrs.items(): + if not first: + out.write(", ") + first = False + + out.write(attr_name) + out.write("=") + _depth_limited_repr(attr_state.value, out, max_depth) + + out.write(")") + + +def _depth_limited_iter_repr( + it: Iterable[Any], out: io.TextIOBase, max_depth: int +) -> None: + """ + Write a repr of the given iterable value to the given stream. This + function will build a bracketed list-like syntax. + + Args: + it: an Iterable + out: A textual output stream + max_depth: A maximum depth + """ + out.write("[") + first = True + for value in it: + if not first: + out.write(", ") + first = False + + _depth_limited_repr(value, out, max_depth) + + out.write("]") + + +def _depth_limited_map_repr(mapp: Mapping, out: io.TextIOBase, max_depth: int) -> None: + """ + Write a repr of the given mapping value to the given stream. This function + will build a JSON-like syntax with braces and key/value pairs. + + Args: + mapp: A mapping + out: A textual output stream + max_depth: A maximum depth + """ + + out.write("{") + first = True + for k, v in mapp.items(): + if not first: + out.write(", ") + first = False + + out.write(repr(k)) + out.write(": ") + _depth_limited_repr(v, out, max_depth) + + out.write("}") diff --git a/src/dioptra/restapi/errors.py b/src/dioptra/restapi/errors.py index ba72c03a5..58fea2a30 100644 --- a/src/dioptra/restapi/errors.py +++ b/src/dioptra/restapi/errors.py @@ -20,87 +20,418 @@ """ from __future__ import annotations +import http +import typing + +import structlog +from flask import request from flask_restx import Api +from structlog.stdlib import BoundLogger + +LOGGER: BoundLogger = structlog.stdlib.get_logger() + + +def add_attribute_values(**kwargs: typing.Any) -> list[str]: + """ + Helper function to add attribute name/value pairs to a list for use in an error + message. + + Args: + buffer: The StringIO instance into which the attribute name/value pairs are to + be added. + attributes: the list of attribute value pairs to add to the buffer. + """ + length = len(kwargs) + def sep(index: int) -> str: + if index == 0: + return " with" + + if index == length - 1: + return ", and" + + return "," + + return [ + f"{sep(index)} {key} having value ({value})" + for index, (key, value) in enumerate(kwargs.items()) + ] + + +class DioptraError(Exception): + """ + Generic Dioptra Error. + Args: + message: An error specific message to display that provide context for why the + error was raised. + """ -class BackendDatabaseError(Exception): + def __init__(self, message: str): + self.message: str = message + + def to_message(self) -> str: + if self.__cause__ is None: + return self.message + + if isinstance(self.__cause__, DioptraError): + return f"{self.message} Cause: {self.__cause__.to_message()}" + + return f"{self.message} Cause: {self.__cause__}" + + +class EntityDoesNotExistError(DioptraError): + """ + The requested entity does not exist. + Args: + entity_type: the entity type name (e.g. "group" or "queue") + kwargs: the attribute value pairs used to request the entity + """ + + def __init__(self, entity_type: str | None = None, **kwargs: typing.Any): + super().__init__( + "".join( + [ + "Failed to locate ", + "an entity" if entity_type is None else entity_type, + *add_attribute_values(**kwargs), + ".", + ] + ) + ) + self.entity_type = "unknown" if entity_type is None else entity_type + self.entity_attributes = kwargs + + +class EntityExistsError(DioptraError): + """ + The requested entity already exists. + Args: + entity_type: the entity type name (e.g. "group" or "queue") + existing_id: the id of the existing entity + kwargs: the attribute value pairs used to request the entity + """ + + def __init__(self, entity_type: str, existing_id: int, **kwargs: typing.Any): + super().__init__( + "".join( + [ + f"The {entity_type}", + *add_attribute_values(**kwargs), + " is not available.", + ] + ) + ) + self.entity_type = entity_type + self.entity_attributes = kwargs + self.existing_id = existing_id + + +class LockError(DioptraError): + """ + Top-level Lock Error. + + Args: + message: a message describing the lock error + """ + + def __init__(self, message: str): + super().__init__(message) + + +class ReadOnlyLockError(LockError): + """The type has a read-only lock and cannot be modified.""" + + def __init__(self, type: str, **kwargs: typing.Any): + super().__init__( + "".join( + [ + f"The {type} type", + *add_attribute_values(**kwargs), + " has a read-only lock and cannot be modified.", + ] + ) + ) + self.entity_type = type + self.entity_attributes = kwargs + + +class BackendDatabaseError(DioptraError): """The backend database returned an unexpected response.""" + def __init__(self): + super().__init__( + "The backend database returned an unexpected response, please " + "contact the system administrator." + ) + -class SearchNotImplementedError(Exception): +class SearchNotImplementedError(DioptraError): """The search functionality has not been implemented.""" + def __init__(self): + super().__init__("The search functionality has not been implemented.") -class SearchParseError(Exception): - """The search query could not be parsed.""" +class SearchParseError(DioptraError): + """The search query could not be parsed.""" -class ResourceDoesNotExistError(Exception): - """The resource does not exist.""" + def __init__(self, context: str, error: str): + super().__init__("The provided search query could not be parsed.") + self.context = context + self.error = error -class DraftDoesNotExistError(Exception): +class DraftDoesNotExistError(DioptraError): """The requested draft does not exist.""" + def __init__(self, **kwargs: typing.Any): + super().__init__( + "".join( + [ + "The requested draft", + *add_attribute_values(**kwargs), + " does not exist.", + ] + ) + ) + self.entity_attributes = kwargs + -class DraftAlreadyExistsError(Exception): +class DraftAlreadyExistsError(DioptraError): """The draft already exists.""" + def __init__(self, type: str, id: int): + super().__init__(f"A draft for a [{type}] with id: {id} already exists.") + self.resource_type = type + self.resource_id = id -def register_base_error_handlers(api: Api) -> None: - @api.errorhandler(BackendDatabaseError) - def handle_backend_database_error(error): - return { - "message": "The backend database returned an unexpected response, please " - "contact the system administrator" - }, 500 - @api.errorhandler(SearchNotImplementedError) - def handle_search_not_implemented_error(error): - return {"message": "The search functionality has not been implemented"}, 501 +class SortParameterValidationError(DioptraError): + """The sort parameters are not valid.""" - @api.errorhandler(SearchParseError) - def handle_search_parse_error(error): - return { - "message": "The provided search query could not be parsed", - "query": error.args[0], - "reason": error.args[1], - }, 422 + def __init__(self, type: str, column: str, **kwargs): + super().__init__(f"The sort parameter, {column}, for {type} is not sortable.") - @api.errorhandler(ResourceDoesNotExistError) - def handle_resource_does_not_exist(error): - return {"message": "Not Found - The requested resource does not exist"}, 404 - @api.errorhandler(DraftDoesNotExistError) - def handle_draft_does_not_exist(error): - return {"message": "Not Found - The requested draft does not exist"}, 404 +class QueryParameterValidationError(DioptraError): + """Input parameters failed validation.""" - @api.errorhandler(DraftAlreadyExistsError) - def handle_draft_already_exists(error): - return ( - {"message": "Bad Request - The draft for this resource already exists."}, - 400, + def __init__(self, type: str, constraint: str, **kwargs): + super().__init__( + "".join( + [ + f"Input parameters for {type} failed {constraint} check", + *add_attribute_values(**kwargs), + ".", + ] + ) + ) + self.resource_type = type + self.constraint = constraint + self.parameters = kwargs + + +class QueryParameterNotUniqueError(QueryParameterValidationError): + """Query Parameters failed unique validatation check.""" + + def __init__(self, type: str, **kwargs): + super().__init__(type, "unique", **kwargs) + + +class JobInvalidStatusTransitionError(DioptraError): + """The requested status transition is invalid.""" + + def __init__(self): + super().__init__("The requested job status update is invalid.") + + +class JobInvalidParameterNameError(DioptraError): + """The requested job parameter name is invalid.""" + + def __init__(self): + super().__init__( + "A provided job parameter name does not match any entrypoint " "parameters." + ) + + +class JobMlflowRunAlreadySetError(DioptraError): + """The requested job already has an mlflow run id set.""" + + def __init__(self): + super().__init__( + "The requested job already has an mlflow run id set. It may " + "not be changed." ) -def register_error_handlers(api: Api) -> None: +class EntityDependencyError(DioptraError): + """ + Base Error for dependency problems between entities. + + Args: + message: a message describing the dependecy error + """ + + def __init__(self, message: str): + super().__init__(message) + + +class EntityNotRegisteredError(DioptraError): + """ + An entity could not be located based on a relationship with another entity. + + Args: + parent_type: the parent or owning type of the relation + parent_id: the parent or owning id of the relation + child_type: the child or dependent type of the relation + child_id: the child or dependent id of the relation + """ + + def __init__( + self, parent_type: str, parent_id: int, child_type: str, child_id: int + ): + super().__init__( + f"The requested {child_type} having id ({child_id}) is not registered to " + f"the {parent_type} having id ({parent_id})." + ) + self.parent_type = parent_type + self.parent_id = parent_id + self.child_type = child_type + self.child_id = child_id + + +class PluginParameterTypeMatchesBuiltinTypeError(DioptraError): + """The plugin parameter type name cannot match a built-in type.""" + + def __init__(self): + super().__init__( + "The requested plugin parameter type name matches a built-in " + "type. Please select another and resubmit." + ) + + +# User Errors +class NoCurrentUserError(DioptraError): + """There is no currently logged-in user.""" + + def __init__(self): + super().__init__("There is no currently logged-in user.") + + +class UserPasswordChangeError(DioptraError): + """Password change failed.""" + + def __init__(self, message: str): + super().__init__(message) + + +class UserPasswordError(DioptraError): + """Password Error.""" + + def __init__(self, message: str): + super().__init__(message) + + +def error_result( + error: DioptraError, status: http.HTTPStatus, detail: dict[str, typing.Any] +) -> tuple[dict[str, typing.Any], int]: + return { + "error": error.__class__.__name__, + "message": f"{status.phrase} - {error.message}", + "detail": detail, + "originating_path": request.full_path, + }, status.value + + +# Silenced Complexity error for this function since it is a straitfoward registration of +# error handlers +def register_error_handlers(api: Api, **kwargs) -> None: # noqa: C901 """Registers the error handlers with the main application. Args: api: The main REST |Api| object. """ + log: BoundLogger = kwargs.get("log", LOGGER.new()) + + @api.errorhandler(EntityDoesNotExistError) + def handle_resource_does_not_exist_error(error: EntityDoesNotExistError): + log.debug( + "Entity not found", entity_type=error.entity_type, **error.entity_attributes + ) + return error_result( + error, + http.HTTPStatus.NOT_FOUND, + {"entity_type": error.entity_type, **error.entity_attributes}, + ) + + @api.errorhandler(EntityExistsError) + def handle_entity_exists_error(error: EntityExistsError): + log.debug( + "Entity exists", + entity_type=error.entity_type, + existing_id=error.existing_id, + **error.entity_attributes, + ) + return error_result( + error, + http.HTTPStatus.CONFLICT, + { + "entity_type": error.entity_type, + "existing_id": error.existing_id, + "entity_attributes": {**error.entity_attributes}, + }, + ) + + @api.errorhandler(BackendDatabaseError) + def handle_backend_database_error(error: BackendDatabaseError): + log.error(error.to_message()) + return error_result(error, http.HTTPStatus.INTERNAL_SERVER_ERROR, {}) + + @api.errorhandler(SearchNotImplementedError) + def handle_search_not_implemented_error(error: SearchNotImplementedError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.NOT_IMPLEMENTED, {}) + + @api.errorhandler(SearchParseError) + def handle_search_parse_error(error: SearchParseError): + log.debug(error.to_message()) + return error_result( + error, + http.HTTPStatus.UNPROCESSABLE_ENTITY, + {"query": error.args[0], "reason": error.args[1]}, + ) + + @api.errorhandler(DraftDoesNotExistError) + def handle_draft_does_not_exist(error: DraftDoesNotExistError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.NOT_FOUND, {}) + + @api.errorhandler(DraftAlreadyExistsError) + def handle_draft_already_exists(error: DraftAlreadyExistsError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.BAD_REQUEST, {}) + + @api.errorhandler(LockError) + def handle_lock_error(error: LockError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.FORBIDDEN, {}) + + @api.errorhandler(NoCurrentUserError) + def handle_no_current_user_error(error: NoCurrentUserError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.UNAUTHORIZED, {}) + + @api.errorhandler(UserPasswordChangeError) + def handle_password_change_error(error: UserPasswordChangeError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.FORBIDDEN, {}) + + @api.errorhandler(UserPasswordError) + def handle_user_password_error(error: UserPasswordError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.UNAUTHORIZED, {}) - from dioptra.restapi import v1 - - register_base_error_handlers(api) - v1.artifacts.errors.register_error_handlers(api) - v1.entrypoints.errors.register_error_handlers(api) - v1.experiments.errors.register_error_handlers(api) - v1.groups.errors.register_error_handlers(api) - v1.jobs.errors.register_error_handlers(api) - v1.models.errors.register_error_handlers(api) - v1.plugin_parameter_types.errors.register_error_handlers(api) - v1.plugins.errors.register_error_handlers(api) - v1.queues.errors.register_error_handlers(api) - v1.tags.errors.register_error_handlers(api) - v1.users.errors.register_error_handlers(api) + @api.errorhandler(DioptraError) + def handle_base_error(error: DioptraError): + log.debug(error.to_message()) + return error_result(error, http.HTTPStatus.BAD_REQUEST, {}) diff --git a/src/dioptra/restapi/utils.py b/src/dioptra/restapi/utils.py index 5208cf124..ecdbb5f0f 100644 --- a/src/dioptra/restapi/utils.py +++ b/src/dioptra/restapi/utils.py @@ -25,6 +25,7 @@ import datetime import functools +from collections import Counter from importlib.resources import as_file, files from typing import Any, Callable, List, Protocol, Type, cast @@ -325,3 +326,26 @@ def setup_injection(api: Api, injector: Injector) -> None: ma.URL: str, ma.UUID: str, } + + +# Validation Functions +def find_non_unique(name: str, parameters: list[dict[str, Any]]) -> list[str]: + """ + Finds all values of a key that are not unique in a list of dictionaries. + Useful for checking that a provided input satisfies uniqueness constraints. + + Note that the key name must be in every dictionary of the provided list. + + Args: + name: the name of the parameter to check + parameters: the input parameters to check + + Returns: + A list of all values that were provided more than once, or an empty list if all + values of the key were unique + """ + name_count: Counter = Counter() + # this line fails if a parameter is missing a "name" value + name_count.update([parameter[name] for parameter in parameters]) + # create a list of all name values that appear more than once + return [key for key in name_count.keys() if name_count[key] > 1] diff --git a/src/dioptra/restapi/v1/artifacts/__init__.py b/src/dioptra/restapi/v1/artifacts/__init__.py index 11ce655e6..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/artifacts/__init__.py +++ b/src/dioptra/restapi/v1/artifacts/__init__.py @@ -14,6 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/artifacts/service.py b/src/dioptra/restapi/v1/artifacts/service.py index 29e08a564..96cc99f5e 100644 --- a/src/dioptra/restapi/v1/artifacts/service.py +++ b/src/dioptra/restapi/v1/artifacts/service.py @@ -26,18 +26,17 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + SortParameterValidationError, +) from dioptra.restapi.v1 import utils from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.jobs.service import ExperimentJobIdService, JobIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - ArtifactAlreadyExistsError, - ArtifactDoesNotExistError, - ArtifactSortError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() RESOURCE_TYPE: Final[str] = "artifact" @@ -97,14 +96,14 @@ def create( The newly created artifact object. Raises: - ArtifactAlreadyExistsError: If the artifact already exists. + EntityExistsError: If the artifact already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if self._artifact_uri_service.get(uri, log=log) is not None: - log.debug("Artifact uri already exists", uri=uri) - raise ArtifactAlreadyExistsError + duplicate = self._artifact_uri_service.get(uri, log=log) + if duplicate is not None: + raise EntityExistsError(RESOURCE_TYPE, duplicate.resource_id, uri=uri) job_dict = cast( utils.JobDict, @@ -220,8 +219,7 @@ def get( sort_column = sort_column.asc() latest_artifacts_stmt = latest_artifacts_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise ArtifactSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) artifacts = db.session.scalars(latest_artifacts_stmt).all() @@ -310,7 +308,7 @@ def get( artifact_uri: str, error_if_not_found: bool = False, **kwargs, - ) -> utils.ArtifactDict | None: + ) -> models.Artifact | None: """Fetch an artifact by its unique uri. Args: @@ -323,7 +321,7 @@ def get( The artifact object if found, otherwise None. Raises: - ArtifactDoesNotExistError: If the artifact is not found and + EntityDoesNotExistError: If the artifact is not found and `error_if_not_found` is True. """ @@ -345,8 +343,7 @@ def get( if artifact is None: if error_if_not_found: - log.debug("Artifact not found", artifact_uri=artifact_uri) - raise ArtifactDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, artifact_uri=artifact_uri) return None @@ -373,7 +370,7 @@ def get( The artifact object if found, otherwise None. Raises: - ArtifactDoesNotExistError: If the artifact is not found and + EntityDoesNotExistError: If the artifact is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -393,8 +390,7 @@ def get( if artifact is None: if error_if_not_found: - log.debug("Artifact not found", artifact_id=artifact_id) - raise ArtifactDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, artifact_id=artifact_id) return None @@ -433,9 +429,9 @@ def modify( The updated artifact object. Raises: - ArtifactDoesNotExistError: If the artifact is not found and + EntityDoesNotExistError: If the artifact is not found and `error_if_not_found` is True. - ArtifactAlreadyExistsError: If the artifact name already exists. + EntityExistsError: If the artifact name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) diff --git a/src/dioptra/restapi/v1/entrypoints/__init__.py b/src/dioptra/restapi/v1/entrypoints/__init__.py index 11ce655e6..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/entrypoints/__init__.py +++ b/src/dioptra/restapi/v1/entrypoints/__init__.py @@ -14,6 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/entrypoints/errors.py b/src/dioptra/restapi/v1/entrypoints/errors.py deleted file mode 100644 index 907983092..000000000 --- a/src/dioptra/restapi/v1/entrypoints/errors.py +++ /dev/null @@ -1,77 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the entrypoint endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class EntrypointAlreadyExistsError(Exception): - """The entrypoint name already exists.""" - - -class EntrypointDoesNotExistError(Exception): - """The requested entrypoint does not exist.""" - - -class EntrypointPluginDoesNotExistError(Exception): - """The requested plugin does not exist for the entrypoint.""" - - -class EntrypointParameterNamesNotUniqueError(Exception): - """Multiple entrypoint parameters share the same name.""" - - -class EntrypointSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(EntrypointDoesNotExistError) - def handle_entrypoint_does_not_exist_error(error): - return {"message": "Not Found - The requested entrypoint does not exist"}, 404 - - @api.errorhandler(EntrypointPluginDoesNotExistError) - def handle_entrypoint_plugin_does_not_exist_error(error): - return { - "message": "Not Found - The requested plugin does not exist for this " - "entrypoint" - }, 404 - - @api.errorhandler(EntrypointAlreadyExistsError) - def handle_entrypoint_already_exists_error(error): - return ( - { - "message": "Bad Request - The entrypoint name on the registration form " - "already exists. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(EntrypointParameterNamesNotUniqueError) - def handle_entrypoint_parameter_names_not_unique_error(error): - return { - "message": "Bad Request - The entrypoint contains multiple parameters " - "with the same name." - }, 400 - - @api.errorhandler(EntrypointSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/entrypoints/service.py b/src/dioptra/restapi/v1/entrypoints/service.py index bc0af723d..a8f3cf711 100644 --- a/src/dioptra/restapi/v1/entrypoints/service.py +++ b/src/dioptra/restapi/v1/entrypoints/service.py @@ -27,23 +27,23 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import resource_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + QueryParameterNotUniqueError, + SortParameterValidationError, +) +from dioptra.restapi.utils import find_non_unique from dioptra.restapi.v1 import utils from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.plugins.service import PluginIdsService -from dioptra.restapi.v1.queues.errors import QueueDoesNotExistError +from dioptra.restapi.v1.queues.service import RESOURCE_TYPE as QUEUE_RESOURCE_TYPE from dioptra.restapi.v1.queues.service import QueueIdsService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - EntrypointAlreadyExistsError, - EntrypointDoesNotExistError, - EntrypointParameterNamesNotUniqueError, - EntrypointPluginDoesNotExistError, - EntrypointSortError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() +PLUGIN_RESOURCE_TYPE: Final[str] = "entry_point_plugin" RESOURCE_TYPE: Final[str] = "entry_point" SEARCHABLE_FIELDS: Final[dict[str, Any]] = { @@ -110,17 +110,15 @@ def create( The newly created entrypoint object. Raises: - EntrypointAlreadyExistsError: If a entrypoint with the given name already - exists. + EntityExistsError: If a entrypoint with the given name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if ( - self._entrypoint_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Entrypoint name already exists", name=name, group_id=group_id) - raise EntrypointAlreadyExistsError + duplicate = self._entrypoint_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) group = self._group_id_service.get(group_id, error_if_not_found=True) queues = self._queue_ids_service.get(queue_ids, error_if_not_found=True) @@ -263,8 +261,7 @@ def get( sort_column = sort_column.asc() entrypoints_stmt = entrypoints_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise EntrypointSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) entrypoints = list(db.session.scalars(entrypoints_stmt).unique().all()) @@ -344,7 +341,7 @@ def get( The entrypoint object if found, otherwise None. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found and + EntityDoesNotExistError: If the entrypoint is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -364,8 +361,9 @@ def get( if entrypoint is None: if error_if_not_found: - log.debug("Entrypoint not found", entrypoint_id=entrypoint_id) - raise EntrypointDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, entrypoint_id=entrypoint_id + ) return None @@ -420,15 +418,16 @@ def modify( The updated entrypoint object. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found and + EntityDoesNotExistError: If the entrypoint is not found and `error_if_not_found` is True. - EntrypointAlreadyExistsError: If the entrypoint name already exists. + EntityExistsError: If the entrypoint name already exists. + QueryParameterNotUniqueError: If the values for the "name" parameter in the + parameters list is not unique """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - - parameter_names = [parameter["name"] for parameter in parameters] - if len(parameter_names) > len(set(parameter_names)): - raise EntrypointParameterNamesNotUniqueError + duplicates = find_non_unique("name", parameters) + if len(duplicates) > 0: + raise QueryParameterNotUniqueError(RESOURCE_TYPE, name=duplicates) entrypoint_dict = self.get( entrypoint_id, error_if_not_found=error_if_not_found, log=log @@ -439,13 +438,14 @@ def modify( entrypoint = entrypoint_dict["entry_point"] group_id = entrypoint.resource.group_id - if ( - name != entrypoint.name - and self._entrypoint_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Entrypoint name already exists", name=name, group_id=group_id) - raise EntrypointAlreadyExistsError + if name != entrypoint.name: + duplicate = self._entrypoint_name_service.get( + name, group_id=group_id, log=log + ) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) queues = self._queue_ids_service.get(queue_ids, error_if_not_found=True) @@ -517,7 +517,7 @@ def delete(self, entrypoint_id: int, **kwargs) -> dict[str, Any]: entrypoint_resource = db.session.scalars(stmt).first() if entrypoint_resource is None: - raise EntrypointDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, entrypoint_id=entrypoint_id) deleted_resource_lock = models.ResourceLock( resource_lock_type=resource_lock_types.DELETE, @@ -567,7 +567,7 @@ def get( The plugin snapshots for the entrypoint. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found. + EntityDoesNotExistError: If the entrypoint is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) log.debug("Get entrypoint by id", entrypoint_id=entrypoint_id) @@ -600,8 +600,8 @@ def append( The updated entrypoint object. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found. - EntrypointAlreadyExistsError: If the entrypoint name already exists. + EntityDoesNotExistError: If the entrypoint is not found. + EntityExistsError: If the entrypoint name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -642,7 +642,11 @@ def append( for plugin_file in plugin["plugin_files"] ] existing_entry_point_plugin_files = [ - entry_point_plugin_file + models.EntryPointPluginFile( + entry_point=new_entrypoint, + plugin=entry_point_plugin_file.plugin, + plugin_file=entry_point_plugin_file.plugin_file, + ) for entry_point_plugin_file in entrypoint.entry_point_plugin_files if entry_point_plugin_file.plugin.resource_id not in set(plugin_ids) ] @@ -711,7 +715,7 @@ def get( The plugin snapshots for the entrypoint. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found. + EntityDoesNotExistError: If the entrypoint or plugin is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) log.debug("Get entrypoint by id", entrypoint_id=entrypoint_id) @@ -728,7 +732,9 @@ def get( for entry_point_plugin_file in entrypoint.entry_point_plugin_files } if plugin_id not in plugins: - raise EntrypointPluginDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_RESOURCE_TYPE, entrypoint_id=entrypoint_id, plugin_id=plugin_id + ) plugin = utils.PluginWithFilesDict( plugin=plugins[plugin_id], plugin_files=[], has_draft=None @@ -757,8 +763,7 @@ def delete( A dictionary reporting the status of the request. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found. - EntrypointPluginDoesNotExistError: If the plugin is not found. + EntityDoesNotExistError: If the entrypoint or plugin is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -773,19 +778,34 @@ def delete( plugin.plugin.resource_id for plugin in entrypoint.entry_point_plugin_files ) if plugin_id not in plugin_ids: - raise EntrypointPluginDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_RESOURCE_TYPE, entrypoint_id=entrypoint_id, plugin_id=plugin_id + ) # create a new snapshot with the plugin removed + entrypoint_parameters = [ + models.EntryPointParameter( + name=param.name, + default_value=param.default_value, + parameter_type=param.parameter_type, + parameter_number=param.parameter_number, + ) + for param in entrypoint.parameters + ] new_entrypoint = models.EntryPoint( name=entrypoint.name, description=entrypoint.description, task_graph=entrypoint.task_graph, - parameters=entrypoint.parameters, + parameters=entrypoint_parameters, resource=entrypoint.resource, creator=current_user, ) new_entrypoint.entry_point_plugin_files = [ - entry_point_plugin_file + models.EntryPointPluginFile( + entry_point=new_entrypoint, + plugin=entry_point_plugin_file.plugin, + plugin_file=entry_point_plugin_file.plugin_file, + ) for entry_point_plugin_file in entrypoint.entry_point_plugin_files if entry_point_plugin_file.plugin.resource_id != plugin_id ] @@ -830,7 +850,7 @@ def get( The entrypoint object if found, otherwise None. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found and + EntityDoesNotExistError: If the entrypoint is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -852,10 +872,9 @@ def get( entrypoint_ids_missing = set(entrypoint_ids) - set( entrypoint.resource_id for entrypoint in entrypoints ) - log.debug( - "Entrypoint not found", entrypoint_ids=list(entrypoint_ids_missing) + raise EntityDoesNotExistError( + RESOURCE_TYPE, entrypoint_ids=list(entrypoint_ids_missing) ) - raise EntrypointDoesNotExistError return entrypoints @@ -896,7 +915,7 @@ def get( The list of plugins. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found and + EntityDoesNotExistError: If the entrypoint is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -933,9 +952,9 @@ def append( The updated list of queues resource objects. Raises: - EntrypointDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. - QueueDoesNotExistError: If one or more queues are not found. + EntityDoesNotExistError: If one or more queues are not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) log.debug( @@ -960,11 +979,6 @@ def append( new_queues = self._queue_ids_service.get( list(new_queue_ids), error_if_not_found=True, log=log ) - if error_if_not_found and len(new_queues) != len(new_queue_ids): - found_queue_ids = set(queue.resource_id for queue in new_queues) - missing_queue_ids = new_queue_ids - found_queue_ids - log.debug(queue_ids=list(missing_queue_ids)) - raise QueueDoesNotExistError entrypoint.children.extend([queue.resource for queue in new_queues]) @@ -995,9 +1009,9 @@ def modify( The updated queue resource object. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. - QueueDoesNotExistError: If one or more queues are not found. + EntityDoesNotExistError: If one or more queues are not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -1109,7 +1123,7 @@ def delete(self, entrypoint_id: int, queue_id, **kwargs) -> dict[str, Any]: removed_queue = queue_resources.pop(queue_id, None) if removed_queue is None: - raise QueueDoesNotExistError + raise EntityDoesNotExistError(QUEUE_RESOURCE_TYPE, queue_id=queue_id) plugin_resources = [ resource @@ -1146,7 +1160,7 @@ def get( The entrypoint object if found, otherwise None. Raises: - EntrypointDoesNotExistError: If the entrypoint is not found and + EntityDoesNotExistError: If the entrypoint is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -1167,8 +1181,9 @@ def get( if entrypoint is None: if error_if_not_found: - log.debug("Entrypoint not found", name=name) - raise EntrypointDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, name=name, group_id=group_id + ) return None diff --git a/src/dioptra/restapi/v1/experiments/__init__.py b/src/dioptra/restapi/v1/experiments/__init__.py index 006f5798f..99936763a 100644 --- a/src/dioptra/restapi/v1/experiments/__init__.py +++ b/src/dioptra/restapi/v1/experiments/__init__.py @@ -15,6 +15,3 @@ # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode """The experiments endpoint subpackage.""" -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/experiments/errors.py b/src/dioptra/restapi/v1/experiments/errors.py deleted file mode 100644 index 5b626c030..000000000 --- a/src/dioptra/restapi/v1/experiments/errors.py +++ /dev/null @@ -1,53 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the experiment endpoints.""" -from flask_restx import Api - - -class ExperimentAlreadyExistsError(Exception): - """The experiment name already exists.""" - - -class ExperimentDoesNotExistError(Exception): - """The requested experiment does not exist.""" - - -class ExperimentSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(ExperimentAlreadyExistsError) - def handle_experiment_already_exists_error(error): - return ( - { - "message": "Bad Request - The experiment name on the registration form " - "already exists. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(ExperimentDoesNotExistError) - def handle_experiment_does_not_exist_error(error): - return {"message": "Not Found - The requested experiment does not exist"}, 404 - - @api.errorhandler(ExperimentSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/experiments/service.py b/src/dioptra/restapi/v1/experiments/service.py index aec8e9f9a..df1d1f2ee 100644 --- a/src/dioptra/restapi/v1/experiments/service.py +++ b/src/dioptra/restapi/v1/experiments/service.py @@ -27,19 +27,20 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import resource_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + SortParameterValidationError, +) from dioptra.restapi.v1 import utils -from dioptra.restapi.v1.entrypoints.errors import EntrypointDoesNotExistError +from dioptra.restapi.v1.entrypoints.service import ( + RESOURCE_TYPE as ENTRYPOINT_RESOURCE_TYPE, +) from dioptra.restapi.v1.entrypoints.service import EntrypointIdsService from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - ExperimentAlreadyExistsError, - ExperimentDoesNotExistError, - ExperimentSortError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() RESOURCE_TYPE: Final[str] = "experiment" @@ -103,17 +104,16 @@ def create( The newly created experiment object. Raises: - ExperimentAlreadyExistsError: If an experiment with the given name already + EntityExistsError: If an experiment with the given name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if ( - self._experiment_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Experiment name already exists", name=name, group_id=group_id) - raise ExperimentAlreadyExistsError + duplicate = self._experiment_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) group = self._group_id_service.get(group_id, error_if_not_found=True) entrypoints = ( @@ -237,8 +237,7 @@ def get( sort_column = sort_column.asc() experiments_stmt = experiments_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise ExperimentSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) experiments = list(db.session.scalars(experiments_stmt).all()) @@ -320,7 +319,7 @@ def get( The experiment object if found, otherwise none. Raises: - ExperimentDoesNotExistError: If the experiment is not found and if + EntityDoesNotExistError: If the experiment is not found and if `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -340,8 +339,9 @@ def get( if experiment is None: if error_if_not_found: - log.debug("Experiment not found", experiment_id=experiment_id) - raise ExperimentDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, experiment_id=experiment_id + ) return None @@ -398,9 +398,9 @@ def modify( The updated experiment object. Raises: - ExperimentDoesNotExistError: If the experiment is not found and + EntityDoesNotExistError: If the experiment is not found and `error_if_not_found` is True. - ExperimentAlreadyExistsError: If the experiment name already exists. + EntityExistsError: If the experiment name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -413,13 +413,14 @@ def modify( experiment = experiment_dict["experiment"] group_id = experiment.resource.group_id - if ( - name != experiment.name - and self._experiment_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Experiment name already exists", name=name, group_id=group_id) - raise ExperimentAlreadyExistsError + if name != experiment.name: + duplicate = self._experiment_name_service.get( + name, group_id=group_id, log=log + ) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) entrypoints = self._entrypoint_ids_service.get( entrypoint_ids, error_if_not_found=True, log=log @@ -466,7 +467,7 @@ def delete(self, experiment_id: int, **kwargs) -> dict[str, Any]: experiment_resource = db.session.scalars(stmt).first() if experiment_resource is None: - raise ExperimentDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, experiment_id=experiment_id) deleted_resource_lock = models.ResourceLock( resource_lock_type=resource_lock_types.DELETE, @@ -515,7 +516,7 @@ def get( The list of plugins. Raises: - ExperimentDoesNotExistError: If the experiment is not found and + EntityDoesNotExistError: If the experiment is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -553,9 +554,9 @@ def append( The updated list of entrypoints resource objects. Raises: - ExperimentDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the experiment is not found and `error_if_not_found` is True. - EntrypointDoesNotExistError: If one or more entrypoints are not found. + EntityDoesNotExistError: If one or more entrypoints are not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) log.debug( @@ -584,8 +585,9 @@ def append( entrypoint.resource_id for entrypoint in new_entrypoints ) missing_entrypoint_ids = new_entrypoint_ids - found_entrypoint_ids - log.debug(entrypoint_ids=list(missing_entrypoint_ids)) - raise EntrypointDoesNotExistError + raise EntityDoesNotExistError( + ENTRYPOINT_RESOURCE_TYPE, entrypoint_ids=list(missing_entrypoint_ids) + ) experiment.children.extend( [entrypoint.resource for entrypoint in new_entrypoints] @@ -620,9 +622,9 @@ def modify( The updated entrypoint resource object. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. - EntrypointDoesNotExistError: If one or more entrypoints are not found. + EntityDoesNotExistError: If one or more entrypoints are not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -727,7 +729,11 @@ def delete(self, experiment_id: int, entrypoint_id, **kwargs) -> dict[str, Any]: removed_entrypoint = entrypoint_resources.pop(entrypoint_id, None) if removed_entrypoint is None: - raise EntrypointDoesNotExistError + raise EntityDoesNotExistError( + ENTRYPOINT_RESOURCE_TYPE, + experiment_id=experiment_id, + entrypoint_id=entrypoint_id, + ) experiment.children = list(entrypoint_resources.values()) @@ -763,7 +769,7 @@ def get( The experiment object if found, otherwise None. Raises: - ExperimentDoesNotExistError: If the experiment is not found and + EntityDoesNotExistError: If the experiment is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -784,8 +790,9 @@ def get( if experiment is None: if error_if_not_found: - log.debug("Experiment not found", name=name) - raise ExperimentDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, experiment_name=name, group_id=group_id + ) return None diff --git a/src/dioptra/restapi/v1/groups/__init__.py b/src/dioptra/restapi/v1/groups/__init__.py index 9d5cefcdb..e80d1a337 100644 --- a/src/dioptra/restapi/v1/groups/__init__.py +++ b/src/dioptra/restapi/v1/groups/__init__.py @@ -15,6 +15,3 @@ # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode """The groups endpoint subpackage.""" -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/groups/errors.py b/src/dioptra/restapi/v1/groups/errors.py deleted file mode 100644 index 3d7449852..000000000 --- a/src/dioptra/restapi/v1/groups/errors.py +++ /dev/null @@ -1,41 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the group endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class GroupNameNotAvailableError(Exception): - """The group name is not available.""" - - -class GroupDoesNotExistError(Exception): - """The requested group does not exist.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(GroupDoesNotExistError) - def handle_user_does_not_exist_error(error): - return {"message": "Not Found - The requested group does not exist"}, 404 - - @api.errorhandler(GroupNameNotAvailableError) - def handle_no_current_user_error(error): - return ( - {"message": "Bad Request - The group name is not available"}, - 400, - ) diff --git a/src/dioptra/restapi/v1/groups/service.py b/src/dioptra/restapi/v1/groups/service.py index 09109bac3..271b9d6ea 100644 --- a/src/dioptra/restapi/v1/groups/service.py +++ b/src/dioptra/restapi/v1/groups/service.py @@ -27,11 +27,13 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import group_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, +) from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import GroupDoesNotExistError, GroupNameNotAvailableError - LOGGER: BoundLogger = structlog.stdlib.get_logger() DEFAULT_GROUP_MEMBER_PERMISSIONS: Final[dict[str, bool]] = { @@ -58,6 +60,8 @@ "name": lambda x: models.Group.name.like(x, escape="/"), } +GROUP_TYPE: Final[str] = "group" + class GroupService(object): """The service methods used for creating and managing groups.""" @@ -104,9 +108,9 @@ def create( """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if self._group_name_service.get(name) is not None: - log.debug("Group name already exists", name=name) - raise GroupNameNotAvailableError + duplicate = self._group_name_service.get(name) + if duplicate is not None: + raise EntityExistsError("group", duplicate.group_id, name=name) new_group = models.Group(name=name, creator=creator) self._group_member_service.create( @@ -222,7 +226,7 @@ def get( The group object if found, otherwise None. Raises: - UserDoesNotExistError: If the group is not found and `error_if_not_found` + EntityDoesNotExistError: If the group is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -233,8 +237,7 @@ def get( if group is None: if error_if_not_found: - log.debug("Group not found", group_id=group_id) - raise GroupDoesNotExistError + raise EntityDoesNotExistError(GROUP_TYPE, group_id=group_id) return None @@ -261,7 +264,7 @@ def modify( The group object. Raises: - GroupDoesNotExistError: If the group is not found and `error_if_not_found` + EntityDoesNotExistError: If the group is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -272,14 +275,13 @@ def modify( if group is None: if error_if_not_found: - log.debug("Group not found", group_id=group_id) - raise GroupDoesNotExistError + raise EntityDoesNotExistError(GROUP_TYPE, group_id=group_id) return None - if self._group_name_service.get(name, log=log) is not None: - log.debug("Group name already exists", name=name) - raise GroupNameNotAvailableError + duplicate = self._group_name_service.get(name, log=log) + if duplicate is not None: + raise EntityExistsError(GROUP_TYPE, duplicate.group_id, name=name) current_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) group.last_modified_on = current_timestamp @@ -307,7 +309,7 @@ def delete(self, group_id: int, **kwargs) -> dict[str, Any]: group = db.session.scalars(stmt).first() if group is None: - raise GroupDoesNotExistError + raise EntityDoesNotExistError(GROUP_TYPE, group_id=group_id) name = group.name @@ -339,7 +341,7 @@ def get( The group object if found, otherwise None. Raises: - GroupDoesNotExistError: If the group is not found and `error_if_not_found` + EntityDoesNotExistError: If the group is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -350,8 +352,7 @@ def get( if group is None: if error_if_not_found: - log.debug("Group not found", name=name) - raise GroupDoesNotExistError + raise EntityDoesNotExistError(GROUP_TYPE, name=name) return None diff --git a/src/dioptra/restapi/v1/jobs/__init__.py b/src/dioptra/restapi/v1/jobs/__init__.py index 11ce655e6..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/jobs/__init__.py +++ b/src/dioptra/restapi/v1/jobs/__init__.py @@ -14,6 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/jobs/errors.py b/src/dioptra/restapi/v1/jobs/errors.py deleted file mode 100644 index 48a984042..000000000 --- a/src/dioptra/restapi/v1/jobs/errors.py +++ /dev/null @@ -1,105 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the job endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class JobDoesNotExistError(Exception): - """The requested job does not exist.""" - - -class JobInvalidStatusTransitionError(Exception): - """The requested status transition is invalid.""" - - -class JobInvalidParameterNameError(Exception): - """The requested job parameter name is invalid.""" - - -class JobMlflowRunAlreadySetError(Exception): - """The requested job already has an mlflow run id set.""" - - -class ExperimentJobDoesNotExistError(Exception): - """The requested experiment job does not exist.""" - - -class EntryPointNotRegisteredToExperimentError(Exception): - """The requested entry point is not registered to the provided experiment.""" - - -class QueueNotRegisteredToEntryPointError(Exception): - """The requested queue is not registered to the provided entry point.""" - - -class JobSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(JobDoesNotExistError) - def handle_job_does_not_exist_error(error): - return {"message": "Not Found - The requested job does not exist"}, 404 - - @api.errorhandler(JobInvalidStatusTransitionError) - def handle_job_invalid_status_transition_error(error): - return { - "message": "Bad Request - The requested job status update is invalid" - }, 400 - - @api.errorhandler(JobInvalidParameterNameError) - def handle_job_invalid_parameter_name_error(error): - return { - "message": "Bad Request - A provided job parameter name does not match any " - "entrypoint parameters" - }, 400 - - @api.errorhandler(JobMlflowRunAlreadySetError) - def handle_job_mlflow_run_already_set_error(error): - return { - "message": "Bad Request - The requested job already has an mlflow run id " - "set. It may not be changed." - }, 400 - - @api.errorhandler(ExperimentJobDoesNotExistError) - def handle_experiment_job_does_not_exist_error(error): - return { - "message": "Not Found - The requested experiment job does not exist" - }, 404 - - @api.errorhandler(EntryPointNotRegisteredToExperimentError) - def handle_entry_point_not_registered_to_experiment_error(error): - return { - "message": "Bad Request - The requested entry point is not registered to " - "the provided experiment" - }, 400 - - @api.errorhandler(QueueNotRegisteredToEntryPointError) - def handle_queue_not_registered_to_entry_point_error(error): - return { - "message": "Bad Request - The requested queue is not registered to the " - "provided entry point" - }, 400 - - @api.errorhandler(JobSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/jobs/service.py b/src/dioptra/restapi/v1/jobs/service.py index 8592d85ca..81a6168bf 100644 --- a/src/dioptra/restapi/v1/jobs/service.py +++ b/src/dioptra/restapi/v1/jobs/service.py @@ -27,26 +27,30 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityNotRegisteredError, + JobInvalidParameterNameError, + JobInvalidStatusTransitionError, + JobMlflowRunAlreadySetError, + SortParameterValidationError, +) from dioptra.restapi.v1 import utils +from dioptra.restapi.v1.entrypoints.service import ( + RESOURCE_TYPE as ENTRYPOINT_RESOURCE_TYPE, +) from dioptra.restapi.v1.entrypoints.service import EntrypointIdService +from dioptra.restapi.v1.experiments.service import ( + RESOURCE_TYPE as EXPERIMENT_RESOURCE_TYPE, +) from dioptra.restapi.v1.experiments.service import ExperimentIdService from dioptra.restapi.v1.groups.service import GroupIdService +from dioptra.restapi.v1.queues.service import RESOURCE_TYPE as QUEUE_RESOURCE_TYPE from dioptra.restapi.v1.queues.service import QueueIdService from dioptra.restapi.v1.shared.rq_service import RQServiceV1 from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - EntryPointNotRegisteredToExperimentError, - ExperimentJobDoesNotExistError, - JobDoesNotExistError, - JobInvalidParameterNameError, - JobInvalidStatusTransitionError, - JobMlflowRunAlreadySetError, - JobSortError, - QueueNotRegisteredToEntryPointError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() RESOURCE_TYPE: Final[str] = "job" @@ -125,7 +129,7 @@ def create( The newly created job object. Raises: - JobAlreadyExistsError: If a job with the given name already exists. + EntityExistsError: If a job with the given name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -162,12 +166,12 @@ def create( ) if entrypoint_id not in set(experiment_entry_point_ids): - log.debug( - "Entry point not registered to experiment", - entrypoint_id=entrypoint_id, - experiment_id=experiment_id, + raise EntityNotRegisteredError( + EXPERIMENT_RESOURCE_TYPE, + experiment_id, + ENTRYPOINT_RESOURCE_TYPE, + entrypoint_id, ) - raise EntryPointNotRegisteredToExperimentError # Validate that the provided queue_id is registered to the entrypoint parent_entry_point = aliased(models.EntryPoint) @@ -190,12 +194,9 @@ def create( ) if queue_id not in set(entry_point_queue_ids): - log.debug( - "Queue not registered to entry point", - queue_id=queue_id, - entrypoint_id=entrypoint_id, + raise EntityNotRegisteredError( + ENTRYPOINT_RESOURCE_TYPE, entrypoint_id, QUEUE_RESOURCE_TYPE, queue_id ) - raise QueueNotRegisteredToEntryPointError # Fetch the validated queue queue_dict = cast( @@ -359,8 +360,7 @@ def get( sort_column = sort_column.asc() jobs_stmt = jobs_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise JobSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) jobs = list(db.session.scalars(jobs_stmt).all()) @@ -408,7 +408,7 @@ def get( The job object if found, otherwise None. Raises: - JobDoesNotExistError: If the job is not found and `error_if_not_found` + EntityDoesNotExistError: If the job is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -427,8 +427,7 @@ def get( if job is None: if error_if_not_found: - log.debug("Job not found", job_id=job_id) - raise JobDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, job_id=job_id) return None @@ -465,7 +464,7 @@ def delete(self, job_id: int, **kwargs) -> dict[str, Any]: job_resource = db.session.scalars(stmt).first() if job_resource is None: - raise JobDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, job_id=job_id) deleted_resource_lock = models.ResourceLock( resource_lock_type="delete", @@ -523,8 +522,7 @@ def get( job = db.session.scalars(stmt).first() if job is None: - log.debug("Job not found", job_id=job_id) - raise JobDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, job_id=job_id) return {"status": job.status, "id": job.resource_id} @@ -609,7 +607,7 @@ def get( query. Raises: - ExperimentDoesNotExistError: If the experiment is not found. + EntityDoesNotExistError: If the experiment is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) log.debug("Get full list of jobs for experiment", experiment_id=experiment_id) @@ -674,8 +672,7 @@ def get( sort_column = sort_column.asc() jobs_stmt = jobs_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise JobSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) jobs = list(db.session.scalars(jobs_stmt).all()) @@ -721,7 +718,7 @@ def get(self, experiment_id: int, job_id: int, **kwargs) -> utils.JobDict: The job object if found, otherwise None. Raises: - ExperimentJobDoesNotExistError: If the job associated with the experiment + EntityDoesNotExistError: If the job associated with the experiment is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -733,12 +730,9 @@ def get(self, experiment_id: int, job_id: int, **kwargs) -> utils.JobDict: experiment_job = db.session.scalar(experiment_job_stmt) if experiment_job is None: - log.debug( - "Experiment Job not found", - job_id=job_id, - experiment_id=experiment_id, + raise EntityDoesNotExistError( + RESOURCE_TYPE, job_id=job_id, experiment_id=experiment_id ) - raise ExperimentJobDoesNotExistError return cast( utils.JobDict, @@ -768,12 +762,9 @@ def delete(self, experiment_id: int, job_id: int, **kwargs) -> dict[str, Any]: experiment_job = db.session.scalar(experiment_job_stmt) if experiment_job is None: - log.debug( - "Job associated with experiment not found", - job_id=job_id, - experiment_id=experiment_id, + raise EntityDoesNotExistError( + RESOURCE_TYPE, job_id=job_id, experiment_id=experiment_id ) - raise ExperimentJobDoesNotExistError return self._job_id_service.delete( job_id=job_id, diff --git a/src/dioptra/restapi/v1/models/__init__.py b/src/dioptra/restapi/v1/models/__init__.py index 11ce655e6..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/models/__init__.py +++ b/src/dioptra/restapi/v1/models/__init__.py @@ -14,6 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/models/errors.py b/src/dioptra/restapi/v1/models/errors.py deleted file mode 100644 index ef9da6579..000000000 --- a/src/dioptra/restapi/v1/models/errors.py +++ /dev/null @@ -1,65 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the model endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class ModelAlreadyExistsError(Exception): - """The model name already exists.""" - - -class ModelDoesNotExistError(Exception): - """The requested model does not exist.""" - - -class ModelVersionDoesNotExistError(Exception): - """The requested version of the model does not exist.""" - - -class ModelSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(ModelDoesNotExistError) - def handle_model_does_not_exist_error(error): - return {"message": "Not Found - The requested model does not exist"}, 404 - - @api.errorhandler(ModelVersionDoesNotExistError) - def handle_model_version_does_not_exist_error(error): - return { - "message": "Not Found - The requested model version does not exist" - }, 404 - - @api.errorhandler(ModelAlreadyExistsError) - def handle_model_already_exists_error(error): - return ( - { - "message": "Bad Request - The model name on the registration form " - "already exists. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(ModelSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/models/service.py b/src/dioptra/restapi/v1/models/service.py index e295efa42..7bfd6bf18 100644 --- a/src/dioptra/restapi/v1/models/service.py +++ b/src/dioptra/restapi/v1/models/service.py @@ -27,19 +27,17 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + SortParameterValidationError, +) from dioptra.restapi.v1 import utils from dioptra.restapi.v1.artifacts.service import ArtifactIdService from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - ModelAlreadyExistsError, - ModelDoesNotExistError, - ModelSortError, - ModelVersionDoesNotExistError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() MODEL_RESOURCE_TYPE: Final[str] = "ml_model" @@ -100,13 +98,15 @@ def create( The newly created model object. Raises: - ModelAlreadyExistsError: If a model with the given name already exists. + EntityExistsError: If a model with the given name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if self._model_name_service.get(name, group_id=group_id, log=log) is not None: - log.debug("Model name already exists", name=name, group_id=group_id) - raise ModelAlreadyExistsError + duplicate = self._model_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + MODEL_RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) group = self._group_id_service.get(group_id, error_if_not_found=True) @@ -220,10 +220,7 @@ def get( sort_column = sort_column.asc() latest_ml_models_stmt = latest_ml_models_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in MODEL_SORTABLE_FIELDS: - log.debug( - f"sort_by_string: '{sort_by_string}' is not in MODEL_SORTABLE_FIELDS" - ) - raise ModelSortError + raise SortParameterValidationError(MODEL_RESOURCE_TYPE, sort_by_string) ml_models = db.session.scalars(latest_ml_models_stmt).all() @@ -326,7 +323,7 @@ def get( The model object if found, otherwise None. Raises: - ModelDoesNotExistError: If the model is not found and `error_if_not_found` + EntityDoesNotExistError: If the model is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -346,8 +343,7 @@ def get( if ml_model is None: if error_if_not_found: - log.debug("Model not found", model_id=model_id) - raise ModelDoesNotExistError + raise EntityDoesNotExistError(MODEL_RESOURCE_TYPE, model_id=model_id) return None @@ -404,9 +400,9 @@ def modify( The updated model object. Raises: - ModelDoesNotExistError: If the model is not found and `error_if_not_found` + EntityDoesNotExistError: If the model is not found and `error_if_not_found` is True. - ModelAlreadyExistsError: If the model name already exists. + EntityExistsError: If the model name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -415,9 +411,6 @@ def modify( ) if ml_model_dict is None: - if error_if_not_found: - raise ModelDoesNotExistError - return None ml_model = ml_model_dict["ml_model"] @@ -425,13 +418,15 @@ def modify( has_draft = ml_model_dict["has_draft"] group_id = ml_model.resource.group_id - if ( - name != ml_model.name - and self._model_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Model name already exists", name=name, group_id=group_id) - raise ModelAlreadyExistsError + if name != ml_model.name: + duplicate = self._model_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + MODEL_RESOURCE_TYPE, + duplicate.resource_id, + name=name, + group_id=group_id, + ) new_ml_model = models.MlModel( name=name, @@ -473,7 +468,7 @@ def delete(self, model_id: int, **kwargs) -> dict[str, Any]: model_resource = db.session.scalars(stmt).first() if model_resource is None: - raise ModelDoesNotExistError + raise EntityDoesNotExistError(MODEL_RESOURCE_TYPE, model_id=model_id) deleted_resource_lock = models.ResourceLock( resource_lock_type="delete", @@ -594,7 +589,7 @@ def get( None. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -705,7 +700,7 @@ def get( The requested version the resource object if found, otherwise None. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -735,8 +730,11 @@ def get( if latest_version is None: if error_if_not_found: - log.debug("Model version not found", version_number=version_number) - raise ModelVersionDoesNotExistError + raise EntityDoesNotExistError( + MODEL_VERSION_RESOURCE_TYPE, + model_id=model_id, + version_number=version_number, + ) return None @@ -766,9 +764,9 @@ def modify( The updated model object. Raises: - ModelDoesNotExistError: If the model is not found and `error_if_not_found` + EntityDoesNotExistError: If the model is not found and `error_if_not_found` is True. - ModelAlreadyExistsError: If the model name already exists. + EntityExistsError: If the model name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -827,7 +825,7 @@ def get( The model object if found, otherwise None. Raises: - ModelDoesNotExistError: If the model is not found and `error_if_not_found` + EntityDoesNotExistError: If the model is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -848,8 +846,9 @@ def get( if ml_model is None: if error_if_not_found: - log.debug("Model not found", name=name) - raise ModelDoesNotExistError + raise EntityDoesNotExistError( + MODEL_RESOURCE_TYPE, name=name, group_id=group_id + ) return None diff --git a/src/dioptra/restapi/v1/plugin_parameter_types/__init__.py b/src/dioptra/restapi/v1/plugin_parameter_types/__init__.py index 11ce655e6..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/plugin_parameter_types/__init__.py +++ b/src/dioptra/restapi/v1/plugin_parameter_types/__init__.py @@ -14,6 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/plugin_parameter_types/errors.py b/src/dioptra/restapi/v1/plugin_parameter_types/errors.py deleted file mode 100644 index e45772f31..000000000 --- a/src/dioptra/restapi/v1/plugin_parameter_types/errors.py +++ /dev/null @@ -1,95 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the plugin parameter type endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class PluginParameterTypeMatchesBuiltinTypeError(Exception): - """The plugin parameter type name cannot match a built-in type.""" - - -class PluginParameterTypeAlreadyExistsError(Exception): - """The plugin parameter type name already exists.""" - - -class PluginParameterTypeDoesNotExistError(Exception): - """The requested plugin parameter type does not exist.""" - - -class PluginParameterTypeReadOnlyLockError(Exception): - """The plugin parameter type has a read-only lock and cannot be modified.""" - - -class PluginParameterTypeMissingParameterError(Exception): - """The requested plugin parameter type is missing a required parameter.""" - - -class PluginParameterSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(PluginParameterTypeMatchesBuiltinTypeError) - def handle_plugin_parameter_type_matches_builtin_type_error(error): - return { - "message": "Bad Request - The requested plugin parameter type name " - "matches a built-in type. Please select another and resubmit." - }, 400 - - @api.errorhandler(PluginParameterTypeDoesNotExistError) - def handle_plugin_parameter_type_does_not_exist_error(error): - return { - "message": "Not Found - The requested plugin parameter type does " - "not exist" - }, 404 - - @api.errorhandler(PluginParameterTypeReadOnlyLockError) - def handle_plugin_parameter_type_read_only_lock_error(error): - return { - "message": "Forbidden - The plugin parameter type has a read-only " - "lock and cannot be modified." - }, 403 - - @api.errorhandler(PluginParameterTypeMissingParameterError) - def handle_plugin_parameter_type_missing_parameter_error(error): - return ( - { - "message": "Forbidden - The requested plugin parameter " - "type is missing a required parameter." - }, - 400, - ) - - @api.errorhandler(PluginParameterTypeAlreadyExistsError) - def handle_plugin_parameter_type_already_exists_error(error): - return ( - { - "message": "Bad Request - The plugin parameter type name on " - "the registration form already exists. Please select " - "another and resubmit." - }, - 400, - ) - - @api.errorhandler(PluginParameterSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/plugin_parameter_types/service.py b/src/dioptra/restapi/v1/plugin_parameter_types/service.py index 28e3c1cd7..e4ce40fa8 100644 --- a/src/dioptra/restapi/v1/plugin_parameter_types/service.py +++ b/src/dioptra/restapi/v1/plugin_parameter_types/service.py @@ -25,20 +25,19 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import resource_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + PluginParameterTypeMatchesBuiltinTypeError, + ReadOnlyLockError, + SortParameterValidationError, +) from dioptra.restapi.v1 import utils from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters from dioptra.task_engine.type_registry import BUILTIN_TYPES -from .errors import ( - PluginParameterSortError, - PluginParameterTypeAlreadyExistsError, - PluginParameterTypeDoesNotExistError, - PluginParameterTypeMatchesBuiltinTypeError, - PluginParameterTypeReadOnlyLockError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() RESOURCE_TYPE: Final[str] = "plugin_task_parameter_type" @@ -107,8 +106,8 @@ def create( Raises: PluginParameterTypeMatchesBuiltinTypeError: If the plugin parameter type name matches a built-in type. - PluginParameterTypeAlreadyExistsError: If a plugin parameter type - with the given name already exists. + EntityExistsError: If a plugin parameter type with the given name already + exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -120,18 +119,13 @@ def create( ) raise PluginParameterTypeMatchesBuiltinTypeError - if ( - self._plugin_parameter_type_name_service.get( - name, group_id=group_id, log=log - ) - is not None - ): - log.debug( - "Plugin Parameter Type name already exists", - name=name, - group_id=group_id, + duplicate = self._plugin_parameter_type_name_service.get( + name, group_id=group_id, log=log + ) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id ) - raise PluginParameterTypeAlreadyExistsError group = self._group_id_service.get(group_id, error_if_not_found=True) @@ -247,8 +241,7 @@ def get( sort_column ) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise PluginParameterSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) plugin_parameter_types = list( db.session.scalars(plugin_parameter_types_stmt).all() @@ -319,7 +312,7 @@ def get( The plugin parameter type object if found, otherwise None. Raises: - PluginParameterTypeDoesNotExistError: If the plugin parameter type + EntityDoesNotExistError: If the plugin parameter type is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -342,11 +335,9 @@ def get( if plugin_parameter_type is None: if error_if_not_found: - log.debug( - "Plugin Parameter Type not found", - plugin_parameter_type_id=plugin_parameter_type_id, + raise EntityDoesNotExistError( + RESOURCE_TYPE, plugin_parameter_type_id=plugin_parameter_type_id ) - raise PluginParameterTypeDoesNotExistError return None @@ -393,9 +384,9 @@ def modify( The updated plugin parameter type object. Raises: - PluginParameterTypeDoesNotExistError: If the plugin parameter type + EntityDoesNotExistError: If the plugin parameter type is not found and `error_if_not_found` is True. - PluginParameterTypeAlreadyExistsError: If the plugin parameter type + EntityExistsError: If the plugin parameter type name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -411,12 +402,11 @@ def modify( group_id = plugin_parameter_type.resource.group_id if plugin_parameter_type.resource.is_readonly: - log.debug( - "The Plugin Parameter Type is read-only and cannot be modified", + raise ReadOnlyLockError( + RESOURCE_TYPE, plugin_parameter_type_id=plugin_parameter_type_id, name=plugin_parameter_type.name, ) - raise PluginParameterTypeReadOnlyLockError if name.strip().lower() in BUILTIN_TYPES: log.debug( @@ -426,19 +416,14 @@ def modify( ) raise PluginParameterTypeMatchesBuiltinTypeError - if ( - name != plugin_parameter_type.name - and self._plugin_parameter_type_name_service.get( + if name != plugin_parameter_type.name: + duplicate = self._plugin_parameter_type_name_service.get( name, group_id=group_id, log=log ) - is not None - ): - log.debug( - "Plugin Parameter Type name already exists", - name=name, - group_id=group_id, - ) - raise PluginParameterTypeAlreadyExistsError + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) new_plugin_parameter_type = models.PluginTaskParameterType( name=name, @@ -482,14 +467,14 @@ def delete(self, plugin_parameter_type_id: int, **kwargs) -> dict[str, Any]: plugin_parameter_type_resource = db.session.scalars(stmt).first() if plugin_parameter_type_resource is None: - raise PluginParameterTypeDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, plugin_parameter_type_id=plugin_parameter_type_id + ) if plugin_parameter_type_resource.is_readonly: - log.debug( - "The Plugin Parameter Type is read-only and cannot be deleted", - plugin_parameter_type_id=plugin_parameter_type_id, + raise ReadOnlyLockError( + RESOURCE_TYPE, plugin_parameter_type_id=plugin_parameter_type_id ) - raise PluginParameterTypeReadOnlyLockError deleted_resource_lock = models.ResourceLock( resource_lock_type=resource_lock_types.DELETE, @@ -530,7 +515,7 @@ def get( The plugin parameter type object if found, otherwise None. Raises: - PluginParameterTypeDoesNotExistError: If the plugin parameter type + EntityDoesNotExistError: If the plugin parameter type is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -555,8 +540,9 @@ def get( if plugin_parameter_type is None: if error_if_not_found: - log.debug("Plugin Parameter Type not found", name=name) - raise PluginParameterTypeDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, name=name, group_id=group_id + ) return None diff --git a/src/dioptra/restapi/v1/plugins/__init__.py b/src/dioptra/restapi/v1/plugins/__init__.py index 8b612d5e8..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/plugins/__init__.py +++ b/src/dioptra/restapi/v1/plugins/__init__.py @@ -14,7 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode - -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/plugins/errors.py b/src/dioptra/restapi/v1/plugins/errors.py deleted file mode 100644 index 4358d9c58..000000000 --- a/src/dioptra/restapi/v1/plugins/errors.py +++ /dev/null @@ -1,121 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the plugin endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class PluginAlreadyExistsError(Exception): - """The plugin name already exists.""" - - -class PluginDoesNotExistError(Exception): - """The requested plugin does not exist.""" - - -class PluginFileAlreadyExistsError(Exception): - """The plugin file filename already exists.""" - - -class PluginFileDoesNotExistError(Exception): - """The requested plugin file does not exist.""" - - -class PluginTaskParameterTypeNotFoundError(Exception): - """One or more referenced plugin task parameter types were not found.""" - - -class PluginTaskNameAlreadyExistsError(Exception): - """More than one plugin task is being assigned the same name.""" - - -class PluginTaskInputParameterNameAlreadyExistsError(Exception): - """More than one plugin task input parameter is being assigned the same name.""" - - -class PluginTaskOutputParameterNameAlreadyExistsError(Exception): - """More than one plugin task output parameter is being assigned the same name.""" - - -class PluginSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(PluginDoesNotExistError) - def handle_plugin_does_not_exist_error(error): - return {"message": "Not Found - The requested plugin does not exist"}, 404 - - @api.errorhandler(PluginAlreadyExistsError) - def handle_plugin_already_exists_error(error): - return ( - { - "message": "Bad Request - The plugin name on the registration form " - "already exists. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(PluginFileDoesNotExistError) - def handle_plugin_file_does_not_exist_error(error): - return {"message": "Not Found - The requested plugin file does not exist"}, 404 - - @api.errorhandler(PluginFileAlreadyExistsError) - def handle_plugin_file_already_exists_error(error): - return ( - { - "message": "Bad Request - The plugin file filename on the " - "registration form already exists. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(PluginTaskParameterTypeNotFoundError) - def handle_plugin_task_parameter_type_not_found_error(error): - return { - "message": "Bad Request - One or more referenced plugin task parameter " - "types were not found." - }, 400 - - @api.errorhandler(PluginTaskNameAlreadyExistsError) - def handle_plugin_task_name_already_exists_error(error): - return { - "message": "Bad Request - More than one plugin task is being assigned the " - "same name." - }, 400 - - @api.errorhandler(PluginTaskInputParameterNameAlreadyExistsError) - def handle_plugin_task_input_parameter_name_already_exists_error(error): - return { - "message": "Bad Request - More than one plugin task input parameter is " - "being assigned the same name." - }, 400 - - @api.errorhandler(PluginTaskOutputParameterNameAlreadyExistsError) - def handle_plugin_task_output_parameter_name_already_exists_error(error): - return { - "message": "Bad Request - More than one plugin task output parameter is " - "being assigned the same name." - }, 400 - - @api.errorhandler(PluginSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/plugins/service.py b/src/dioptra/restapi/v1/plugins/service.py index 9849f186c..fadb12ec4 100644 --- a/src/dioptra/restapi/v1/plugins/service.py +++ b/src/dioptra/restapi/v1/plugins/service.py @@ -29,23 +29,18 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import resource_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + QueryParameterNotUniqueError, + SortParameterValidationError, +) +from dioptra.restapi.utils import find_non_unique from dioptra.restapi.v1 import utils from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - PluginAlreadyExistsError, - PluginDoesNotExistError, - PluginFileAlreadyExistsError, - PluginFileDoesNotExistError, - PluginSortError, - PluginTaskInputParameterNameAlreadyExistsError, - PluginTaskNameAlreadyExistsError, - PluginTaskOutputParameterNameAlreadyExistsError, - PluginTaskParameterTypeNotFoundError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() PLUGIN_RESOURCE_TYPE: Final[str] = "plugin" @@ -105,13 +100,18 @@ def create( The newly created plugin object. Raises: - PluginAlreadyExistsError: If a plugin with the given name already exists. + EntityExistsError: If a plugin with the given name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if self._plugin_name_service.get(name, group_id=group_id, log=log) is not None: - log.debug("Plugin name already exists", name=name, group_id=group_id) - raise PluginAlreadyExistsError + duplicate = self._plugin_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + PLUGIN_RESOURCE_TYPE, + duplicate.resource_id, + name=name, + group_id=group_id, + ) group = self._group_id_service.get(group_id, error_if_not_found=True) @@ -222,10 +222,7 @@ def get( sort_column = sort_column.asc() latest_plugins_stmt = latest_plugins_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in PLUGIN_SORTABLE_FIELDS: - log.debug( - f"sort_by_string: '{sort_by_string}' is not in PLUGIN_SORTABLE_FIELDS" - ) - raise PluginSortError + raise SortParameterValidationError(PLUGIN_RESOURCE_TYPE, sort_by_string) plugins = db.session.scalars(latest_plugins_stmt).all() @@ -327,7 +324,7 @@ def get( The plugin object if found, otherwise None. Raises: - PluginDoesNotExistError: If the plugin is not found and `error_if_not_found` + EntityDoesNotExistError: If the plugin is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -347,8 +344,7 @@ def get( if plugin is None: if error_if_not_found: - log.debug("Plugin not found", plugin_id=plugin_id) - raise PluginDoesNotExistError + raise EntityDoesNotExistError(PLUGIN_RESOURCE_TYPE, plugin_id=plugin_id) return None @@ -403,9 +399,9 @@ def modify( The updated plugin object. Raises: - PluginDoesNotExistError: If the plugin is not found and `error_if_not_found` + EntityDoesNotExistError: If the plugin is not found and `error_if_not_found` is True. - PluginAlreadyExistsError: If the plugin name already exists. + EntityExistsError: If the plugin name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -420,13 +416,15 @@ def modify( plugin_files = plugin_dict["plugin_files"] group_id = plugin.resource.group_id - if ( - name != plugin.name - and self._plugin_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Plugin name already exists", name=name, group_id=group_id) - raise PluginAlreadyExistsError + if name != plugin.name: + duplicate = self._plugin_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + PLUGIN_RESOURCE_TYPE, + duplicate.resource_id, + name=name, + group_id=group_id, + ) new_plugin = models.Plugin( name=name, @@ -466,7 +464,7 @@ def delete(self, plugin_id: int, **kwargs) -> dict[str, Any]: plugin_resource = db.session.scalar(stmt) if plugin_resource is None: - raise PluginDoesNotExistError + raise EntityDoesNotExistError(PLUGIN_RESOURCE_TYPE, plugin_id=plugin_id) deleted_resource_lock = models.ResourceLock( resource_lock_type=resource_lock_types.DELETE, @@ -499,11 +497,9 @@ def get( A list of plugin objects. Raises: - PluginDoesNotExistError: If the plugin is not found and `error_if_not_found` + EntityDoesNotExistError: If the plugin is not found and `error_if_not_found` is True. """ - log: BoundLogger = kwargs.get("log", LOGGER.new()) - latest_plugins_stmt = ( select(models.Plugin) .join(models.Resource) @@ -520,8 +516,9 @@ def get( plugin_ids_missing = set(plugin_ids) - set( plugin.resource_id for plugin in plugins ) - log.debug("Plugin not found", plugin_ids=list(plugin_ids_missing)) - raise PluginDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_RESOURCE_TYPE, plugin_ids=list(plugin_ids_missing) + ) # extract list of plugin ids plugin_ids = [plugin.resource_id for plugin in plugins] @@ -609,7 +606,7 @@ def get( The plugin object if found, otherwise None. Raises: - PluginDoesNotExistError: If the plugin is not found and `error_if_not_found` + EntityDoesNotExistError: If the plugin is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -630,8 +627,9 @@ def get( if plugin is None: if error_if_not_found: - log.debug("Plugin not found", name=name) - raise PluginDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_RESOURCE_TYPE, name=name, group_id=group_id + ) return plugin @@ -654,7 +652,7 @@ def get( The plugin file object if found, otherwise None. Raises: - PluginFileDoesNotExistError: If the plugin is not found and + EntityDoesNotExistError: If the plugin is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -679,8 +677,9 @@ def get( if plugin_file is None: if error_if_not_found: - log.debug("Plugin file not found", filename=filename) - raise PluginFileDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_FILE_RESOURCE_TYPE, plugin_id=plugin_id, filename=filename + ) return None @@ -735,7 +734,7 @@ def create( The newly created plugin file object. Raises: - PluginFileAlreadyExistsError: If a plugin file with the given filename + EntityExistsError: If a plugin file with the given filename already exists. """ @@ -749,14 +748,16 @@ def create( ) # Validate that the proposed filename hasn't already been used in the plugin. - if ( - self._plugin_file_name_service.get(filename, plugin_id=plugin_id, log=log) - is not None - ): - log.debug( - "Plugin filename already exists", filename=filename, plugin_id=plugin_id + duplicate = self._plugin_file_name_service.get( + filename, plugin_id=plugin_id, log=log + ) + if duplicate is not None: + raise EntityExistsError( + PLUGIN_FILE_RESOURCE_TYPE, + duplicate.resource_id, + filename=filename, + plugin_id=plugin_id, ) - raise PluginFileAlreadyExistsError # The owner of the PluginFile resource must match the owner of the Plugin # resource. @@ -850,8 +851,7 @@ def get( plugin = db.session.scalar(latest_plugin_stmt) if plugin is None: - log.debug("Plugin not found", plugin_id=plugin_id) - raise PluginDoesNotExistError + raise EntityDoesNotExistError(PLUGIN_RESOURCE_TYPE, plugin_id=plugin_id) latest_plugin_files_count_stmt = ( select(func.count(models.PluginFile.resource_id)) @@ -899,11 +899,9 @@ def get( sort_column = sort_column.asc() latest_plugin_files_stmt = latest_plugin_files_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in PLUGIN_FILE_SORTABLE_FIELDS: - log.debug( - f"sort_by_string: '{sort_by_string}' " - f"is not in PLUGIN_FILE_SORTABLE_FIELDS" + raise SortParameterValidationError( + PLUGIN_FILE_RESOURCE_TYPE, sort_by_string ) - raise PluginSortError plugin_files_dict: dict[int, utils.PluginFileDict] = { plugin_file.resource_id: utils.PluginFileDict( @@ -949,7 +947,7 @@ def delete(self, plugin_id: int, **kwargs) -> dict[str, Any]: plugin_resource = db.session.scalar(stmt) if plugin_resource is None: - raise PluginDoesNotExistError + raise EntityDoesNotExistError(PLUGIN_RESOURCE_TYPE, plugin_id=plugin_id) latest_plugin_files_stmt = ( select(models.PluginFile) @@ -1010,7 +1008,7 @@ def get( The plugin object if found, otherwise None. Raises: - PluginDoesNotExistError: If the plugin or plugin file is not found and + EntityDoesNotExistError: If the plugin or plugin file is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -1032,8 +1030,7 @@ def get( if plugin is None: if error_if_not_found: - log.debug("Plugin not found", plugin_id=plugin_id) - raise PluginDoesNotExistError + raise EntityDoesNotExistError(PLUGIN_RESOURCE_TYPE, plugin_id=plugin_id) return None @@ -1052,8 +1049,11 @@ def get( if plugin_file is None: if error_if_not_found: - log.debug("Plugin file not found", plugin_file_id=plugin_file_id) - raise PluginFileDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_FILE_RESOURCE_TYPE, + plugin_id=plugin_id, + plugin_file_id=plugin_file_id, + ) return None @@ -1102,7 +1102,7 @@ def modify( The updated plugin file object. Raises: - PluginDoesNotExistError: If the plugin or plugin file is not found and + EntityDoesNotExistError: If the plugin or plugin file is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -1120,17 +1120,17 @@ def modify( plugin = plugin_file_dict["plugin"] plugin_file = plugin_file_dict["plugin_file"] - if ( - filename != plugin_file.filename - and self._plugin_file_name_service.get( + if filename != plugin_file.filename: + duplicate = self._plugin_file_name_service.get( filename, plugin_id=plugin_id, log=log ) - is not None - ): - log.debug( - "Plugin filename already exists", filename=filename, plugin_id=plugin_id - ) - raise PluginFileAlreadyExistsError + if duplicate is not None: + raise EntityExistsError( + PLUGIN_FILE_RESOURCE_TYPE, + duplicate.resource_id, + filename=filename, + plugin_id=plugin_id, + ) updated_plugin_file = models.PluginFile( filename=filename, @@ -1173,7 +1173,7 @@ def delete(self, plugin_id: int, plugin_file_id: int, **kwargs) -> dict[str, Any plugin_resource = db.session.scalar(stmt) if plugin_resource is None: - raise PluginDoesNotExistError + raise EntityDoesNotExistError(PLUGIN_RESOURCE_TYPE, plugin_id=plugin_id) plugin_file_stmt = ( select(models.PluginFile) @@ -1189,7 +1189,11 @@ def delete(self, plugin_id: int, plugin_file_id: int, **kwargs) -> dict[str, Any plugin_file = db.session.scalar(plugin_file_stmt) if plugin_file is None: - raise PluginFileDoesNotExistError + raise EntityDoesNotExistError( + PLUGIN_FILE_RESOURCE_TYPE, + plugin_id=plugin_id, + plugin_file_id=plugin_file_id, + ) plugin_file_id_to_return = plugin_file.resource_id # to return to user db.session.add( @@ -1214,27 +1218,21 @@ def _construct_plugin_task( parameter_types_id_to_orm: dict[int, models.PluginTaskParameterType], log: BoundLogger, ) -> models.PluginTask: - input_param_names = [x["name"] for x in task["input_params"]] - unique_input_param_names = set(input_param_names) - - if len(unique_input_param_names) != len(input_param_names): - log.error( - "One or more input parameters have the same name", + duplicates = find_non_unique("name", task["input_params"]) + if len(duplicates) > 0: + raise QueryParameterNotUniqueError( + "plugin task input parameter", plugin_task_name=task["name"], - input_param_names=input_param_names, + input_param_names=duplicates, ) - raise PluginTaskInputParameterNameAlreadyExistsError - - output_param_names = [x["name"] for x in task["output_params"]] - unique_output_param_names = set(output_param_names) - if len(unique_output_param_names) != len(output_param_names): - log.error( - "One or more output parameters have the same name", + duplicates = find_non_unique("name", task["output_params"]) + if len(duplicates) > 0: + raise QueryParameterNotUniqueError( + "plugin task output parameter", plugin_task_name=task["name"], - output_param_names=output_param_names, + output_param_names=duplicates, ) - raise PluginTaskOutputParameterNameAlreadyExistsError input_parameters_list = [] for parameter_number, input_param in enumerate(task["input_params"]): @@ -1306,13 +1304,12 @@ def _get_referenced_parameter_types( if not len(parameter_types) == len(parameter_type_ids): returned_parameter_type_ids = set([x.resource_id for x in parameter_types]) ids_not_found = parameter_type_ids - returned_parameter_type_ids - log.error( - "One or more referenced plugin task parameter types were not found", + raise EntityDoesNotExistError( + "plugin task parameter types", num_expected=len(parameter_type_ids), num_found=len(parameter_types), ids_not_found=sorted(list(ids_not_found)), ) - raise PluginTaskParameterTypeNotFoundError return {x.resource_id: x for x in parameter_types} @@ -1323,12 +1320,9 @@ def _add_plugin_tasks( if not tasks: return None - task_names = [x["name"] for x in tasks] - unique_task_names = set(task_names) - - if len(unique_task_names) != len(tasks): - log.error("One or more tasks have the same name", task_names=task_names) - raise PluginTaskNameAlreadyExistsError + duplicates = find_non_unique("name", tasks) + if len(duplicates) > 0: + raise QueryParameterNotUniqueError("plugin task", task_names=duplicates) parameter_types_id_to_orm = _get_referenced_parameter_types(tasks, log=log) or {} for task in tasks: diff --git a/src/dioptra/restapi/v1/queues/__init__.py b/src/dioptra/restapi/v1/queues/__init__.py index f2c5735af..c68b10594 100644 --- a/src/dioptra/restapi/v1/queues/__init__.py +++ b/src/dioptra/restapi/v1/queues/__init__.py @@ -14,7 +14,6 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors from .controller import api -__all__ = ["api", "errors"] +__all__ = ["api"] diff --git a/src/dioptra/restapi/v1/queues/errors.py b/src/dioptra/restapi/v1/queues/errors.py deleted file mode 100644 index 12d4a3a41..000000000 --- a/src/dioptra/restapi/v1/queues/errors.py +++ /dev/null @@ -1,63 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the queue endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class QueueAlreadyExistsError(Exception): - """The queue name already exists.""" - - -class QueueDoesNotExistError(Exception): - """The requested queue does not exist.""" - - -class QueueLockedError(Exception): - """The requested queue is locked.""" - - -class QueueSortError(Exception): - """The requested sortBy column is not a sortable field.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(QueueDoesNotExistError) - def handle_queue_does_not_exist_error(error): - return {"message": "Not Found - The requested queue does not exist"}, 404 - - @api.errorhandler(QueueLockedError) - def handle_queue_locked_error(error): - return {"message": "Forbidden - The requested queue is locked."}, 403 - - @api.errorhandler(QueueAlreadyExistsError) - def handle_queue_already_exists_error(error): - return ( - { - "message": "Bad Request - The queue name on the registration form " - "already exists. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(QueueSortError) - def handle_queue_sort_error(error): - return ( - {"message": "Bad Request - This column can not be sorted."}, - 400, - ) diff --git a/src/dioptra/restapi/v1/queues/service.py b/src/dioptra/restapi/v1/queues/service.py index ad21273a6..a9e552637 100644 --- a/src/dioptra/restapi/v1/queues/service.py +++ b/src/dioptra/restapi/v1/queues/service.py @@ -27,13 +27,16 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import resource_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + SortParameterValidationError, +) from dioptra.restapi.v1 import utils from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import QueueAlreadyExistsError, QueueDoesNotExistError, QueueSortError - LOGGER: BoundLogger = structlog.stdlib.get_logger() RESOURCE_TYPE: Final[str] = "queue" @@ -91,18 +94,20 @@ def create( The newly created queue object. Raises: - QueueAlreadyExistsError: If a queue with the given name already exists. - GroupDoesNotExistError: If the group with the provided ID does not exist. + EntityExistsError: If a queue with the given name already exists. + EntityDoesNotExistError: If the group with the provided ID does not exist. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if self._queue_name_service.get(name, group_id=group_id, log=log) is not None: - log.debug("Queue name already exists", name=name, group_id=group_id) - raise QueueAlreadyExistsError + duplicate = self._queue_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) group = self._group_id_service.get(group_id, error_if_not_found=True) - resource = models.Resource(resource_type="queue", owner=group) + resource = models.Resource(resource_type=RESOURCE_TYPE, owner=group) new_queue = models.Queue( name=name, description=description, resource=resource, creator=current_user ) @@ -202,8 +207,7 @@ def get( sort_column = sort_column.asc() queues_stmt = queues_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise QueueSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) queues = list(db.session.scalars(queues_stmt).all()) @@ -257,7 +261,7 @@ def get( The queue object if found, otherwise None. Raises: - QueueDoesNotExistError: If the queue is not found and `error_if_not_found` + EntityDoesNotExistError: If the queue is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -276,8 +280,7 @@ def get( if queue is None: if error_if_not_found: - log.debug("Queue not found", queue_id=queue_id) - raise QueueDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, queue_id=queue_id) return None @@ -318,9 +321,9 @@ def modify( The updated queue object. Raises: - QueueDoesNotExistError: If the queue is not found and `error_if_not_found` + EntityDoesNotExistError: If the queue is not found and `error_if_not_found` is True. - QueueAlreadyExistsError: If the queue name already exists. + EntityExistsError: If the queue name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -331,13 +334,12 @@ def modify( queue = queue_dict["queue"] group_id = queue.resource.group_id - if ( - name != queue.name - and self._queue_name_service.get(name, group_id=group_id, log=log) - is not None - ): - log.debug("Queue name already exists", name=name, group_id=group_id) - raise QueueAlreadyExistsError + if name != queue.name: + duplicate = self._queue_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.resource_id, name=name, group_id=group_id + ) new_queue = models.Queue( name=name, @@ -368,7 +370,7 @@ def delete(self, queue_id: int, **kwargs) -> dict[str, Any]: A dictionary reporting the status of the request. Raises: - QueueDoesNotExistError: If the queue is not found. + EntityDoesNotExistError: If the queue is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -378,7 +380,7 @@ def delete(self, queue_id: int, **kwargs) -> dict[str, Any]: queue_resource = db.session.scalars(stmt).first() if queue_resource is None: - raise QueueDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, queue_id=queue_id) deleted_resource_lock = models.ResourceLock( resource_lock_type=resource_lock_types.DELETE, @@ -411,7 +413,7 @@ def get( The queue object if found, otherwise None. Raises: - QueueDoesNotExistError: If the queue is not found and `error_if_not_found` + EntityDoesNotExistError: If the queue is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -432,8 +434,9 @@ def get( queue_ids_missing = set(queue_ids) - set( queue.resource_id for queue in queues ) - log.debug("Queue not found", queue_ids=list(queue_ids_missing)) - raise QueueDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, queue_ids=list(queue_ids_missing) + ) return queues @@ -460,7 +463,7 @@ def get( The queue object if found, otherwise None. Raises: - QueueDoesNotExistError: If the queue is not found and `error_if_not_found` + EntityDoesNotExistError: If the queue is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -480,8 +483,7 @@ def get( if queue is None: if error_if_not_found: - log.debug("Queue not found", name=name) - raise QueueDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, name=name) return None diff --git a/src/dioptra/restapi/v1/shared/drafts/service.py b/src/dioptra/restapi/v1/shared/drafts/service.py index 94e22115a..099d11234 100644 --- a/src/dioptra/restapi/v1/shared/drafts/service.py +++ b/src/dioptra/restapi/v1/shared/drafts/service.py @@ -31,9 +31,8 @@ BackendDatabaseError, DraftAlreadyExistsError, DraftDoesNotExistError, - ResourceDoesNotExistError, + EntityDoesNotExistError, ) -from dioptra.restapi.v1.groups.errors import GroupDoesNotExistError from dioptra.restapi.v1.groups.service import GroupIdService LOGGER: BoundLogger = structlog.stdlib.get_logger() @@ -159,7 +158,7 @@ def create( The newly created draft object. Raises: - GroupDoesNotExistError: If the group with the provided ID does not exist. + EntityDoesNotExistError: If the group with the provided ID does not exist. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -173,7 +172,9 @@ def create( ) resource = db.session.scalar(stmt) if resource is None: - raise GroupDoesNotExistError + raise EntityDoesNotExistError( + self._resource_type, resource_id=base_resource_id + ) group = resource.owner draft_payload = { @@ -249,8 +250,7 @@ def get( if draft is None: if error_if_not_found: - log.debug("Draft not found", draft_resource_id=draft_id) - raise DraftDoesNotExistError + raise DraftDoesNotExistError(draft_resource_id=draft_id) return None @@ -371,8 +371,7 @@ def get( if draft is None: if error_if_not_found: - log.debug("Draft not found", resource_id=resource_id) - raise DraftDoesNotExistError + raise DraftDoesNotExistError(resource_id=resource_id) return None, num_other_drafts @@ -407,11 +406,11 @@ def create( resource = db.session.scalars(stmt).first() if resource is None: - raise ResourceDoesNotExistError + raise EntityDoesNotExistError(self._resource_type, resource_id=resource_id) existing_draft, num_other_drafts = self.get(resource_id, log=log) if existing_draft: - raise DraftAlreadyExistsError + raise DraftAlreadyExistsError(self._resource_type, resource_id) draft_payload = { "resource_data": payload, diff --git a/src/dioptra/restapi/v1/shared/snapshots/service.py b/src/dioptra/restapi/v1/shared/snapshots/service.py index 5301a0033..e2769c393 100644 --- a/src/dioptra/restapi/v1/shared/snapshots/service.py +++ b/src/dioptra/restapi/v1/shared/snapshots/service.py @@ -25,7 +25,7 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models -from dioptra.restapi.errors import BackendDatabaseError, ResourceDoesNotExistError +from dioptra.restapi.errors import BackendDatabaseError, EntityDoesNotExistError from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters LOGGER: BoundLogger = structlog.stdlib.get_logger() @@ -74,7 +74,7 @@ def get( None. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -87,8 +87,9 @@ def get( if resource is None: if error_if_not_found: - log.debug("Resource not found", resource_id=resource_id) - raise ResourceDoesNotExistError + raise EntityDoesNotExistError( + self._resource_type, resource_id=resource_id + ) return None @@ -176,7 +177,7 @@ def get( The requested snapshot the resource object if found, otherwise None. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -189,8 +190,9 @@ def get( if resource is None: if error_if_not_found: - log.debug("Resource not found", resource_id=resource_id) - raise ResourceDoesNotExistError + raise EntityDoesNotExistError( + self._resource_type, resource_id=resource_id + ) return None @@ -207,8 +209,9 @@ def get( if snapshot is None: if error_if_not_found: - log.debug("Resource snapshot not found", snapshot_id=snapshot_id) - raise ResourceDoesNotExistError + raise EntityDoesNotExistError( + self._resource_type + "_snapshot", snapshot_id=snapshot_id + ) return None diff --git a/src/dioptra/restapi/v1/shared/tags/service.py b/src/dioptra/restapi/v1/shared/tags/service.py index 1cb0e4569..c3c181e44 100644 --- a/src/dioptra/restapi/v1/shared/tags/service.py +++ b/src/dioptra/restapi/v1/shared/tags/service.py @@ -25,8 +25,8 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models -from dioptra.restapi.errors import ResourceDoesNotExistError -from dioptra.restapi.v1.tags.errors import TagDoesNotExistError +from dioptra.restapi.errors import EntityDoesNotExistError +from dioptra.restapi.v1.tags.service import RESOURCE_TYPE as TAG_RESOURCE_TYPE from dioptra.restapi.v1.tags.service import TagIdService LOGGER: BoundLogger = structlog.stdlib.get_logger() @@ -69,7 +69,7 @@ def get( The list of tags if the resource is found, otherwise None. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -100,7 +100,7 @@ def append( The updated tag resource object. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. TagDoesNotExistError: If one or more tags are not found. """ @@ -147,9 +147,9 @@ def modify( The updated tag resource object. Raises: - ResourceDoesNotExistError: If the resource is not found and + EntityDoesNotExistError: If the resource is not found and `error_if_not_found` is True. - TagDoesNotExistError: If one or more tags are not found. + EntityDoesNotExistError: If one or more tags are not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -232,7 +232,9 @@ def delete(self, resource_id: int, tag_id, **kwargs) -> dict[str, Any]: current_tags = resource.tags tag_exists = tag_id in {tag.tag_id for tag in current_tags} if not tag_exists: - raise TagDoesNotExistError + raise EntityDoesNotExistError( + TAG_RESOURCE_TYPE, resource_id=resource_id, tag_id=tag_id + ) resource.tags = [tag for tag in current_tags if tag.tag_id != tag_id] @@ -259,7 +261,6 @@ def get(self, resource_id: int, **kwargs) -> models.Resource: resource = db.session.scalar(stmt) if resource is None: - log.debug(f"{self._resource_type} not found", resource_id=resource_id) - raise ResourceDoesNotExistError + raise EntityDoesNotExistError(self._resource_type, resource_id=resource_id) return resource diff --git a/src/dioptra/restapi/v1/tags/__init__.py b/src/dioptra/restapi/v1/tags/__init__.py index 11ce655e6..ab0a41a34 100644 --- a/src/dioptra/restapi/v1/tags/__init__.py +++ b/src/dioptra/restapi/v1/tags/__init__.py @@ -14,6 +14,3 @@ # # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/tags/service.py b/src/dioptra/restapi/v1/tags/service.py index 663b5f013..a81a905c9 100644 --- a/src/dioptra/restapi/v1/tags/service.py +++ b/src/dioptra/restapi/v1/tags/service.py @@ -27,14 +27,18 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + SortParameterValidationError, +) from dioptra.restapi.v1.groups.service import GroupIdService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import TagAlreadyExistsError, TagDoesNotExistError, TagSortError - LOGGER: BoundLogger = structlog.stdlib.get_logger() +RESOURCE_TYPE: Final[str] = "tag" SEARCHABLE_FIELDS: Final[dict[str, Any]] = { "name": lambda x: models.Tag.name.like(x), } @@ -85,14 +89,16 @@ def create( The newly created tag object. Raises: - TagAlreadyExistsError: If a tag with the given name already exists. - GroupDoesNotExistError: If the group with the provided ID does not exist. + EntityExistsError: If a tag with the given name already exists. + EntityDoesNotExistError: If the group with the provided ID does not exist. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) - if self._tag_name_service.get(name, group_id=group_id, log=log) is not None: - log.debug("Tag name already exists", name=name, group_id=group_id) - raise TagAlreadyExistsError + duplicate = self._tag_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.tag_id, name=name, group_id=group_id + ) group = self._group_id_service.get(group_id, error_if_not_found=True) @@ -172,8 +178,7 @@ def get( sort_column = sort_column.asc() tags_stmt = tags_stmt.order_by(sort_column) elif sort_by_string and sort_by_string not in SORTABLE_FIELDS: - log.debug(f"sort_by_string: '{sort_by_string}' is not in SORTABLE_FIELDS") - raise TagSortError + raise SortParameterValidationError(RESOURCE_TYPE, sort_by_string) tags = list(db.session.scalars(tags_stmt).all()) @@ -214,7 +219,7 @@ def get( The tag object if found, otherwise None. Raises: - TagDoesNotExistError: If the tag is not found and `error_if_not_found` + EntityDoesNotExistError: If the tag is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -225,8 +230,7 @@ def get( if tag is None: if error_if_not_found: - log.debug("Tag not found", tag_id=tag_id) - raise TagDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, tag_id=tag_id) return None @@ -254,9 +258,9 @@ def modify( The updated tag object. Raises: - TagDoesNotExistError: If the tag is not found and `error_if_not_found` + EntityDoesNotExistError: If the tag is not found and `error_if_not_found` is True. - TagAlreadyExistsError: If the tag name already exists. + EntityExistsError: If the tag name already exists. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -266,12 +270,12 @@ def modify( return None group_id = tag.group_id - if ( - name != tag.name - and self._tag_name_service.get(name, group_id=group_id, log=log) is not None - ): - log.debug("Tag name already exists", name=name, group_id=group_id) - raise TagAlreadyExistsError + if name != tag.name: + duplicate = self._tag_name_service.get(name, group_id=group_id, log=log) + if duplicate is not None: + raise EntityExistsError( + RESOURCE_TYPE, duplicate.tag_id, name=name, group_id=group_id + ) current_timestamp = datetime.datetime.now(tz=datetime.timezone.utc) tag.name = name @@ -301,7 +305,7 @@ def delete( The tag object if found, otherwise None. Raises: - TagDoesNotExistError: If the tag is not found. + EntityDoesNotExistError: If the tag is not found. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) log.debug("Get tag by id", tag_id=tag_id) @@ -310,8 +314,7 @@ def delete( tag = db.session.scalar(stmt) if tag is None: - log.debug("Tag not found", tag_id=tag_id) - raise TagDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, tag_id=tag_id) tag_id = tag.tag_id db.session.delete(tag) @@ -360,7 +363,7 @@ def get( The tag object if found, otherwise None. Raises: - TagDoesNotExistError: If the tag is not found and `error_if_not_found` + EntityDoesNotExistError: If the tag is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -371,8 +374,7 @@ def get( if tag is None: if error_if_not_found: - log.debug("Tag not found", tag_id=tag_id) - raise TagDoesNotExistError + raise EntityDoesNotExistError(RESOURCE_TYPE, tag_id=tag_id) return None @@ -430,7 +432,7 @@ def get( The tag object if found, otherwise None. Raises: - TagDoesNotExistError: If the tag is not found and `error_if_not_found` + EntityDoesNotExistError: If the tag is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -444,8 +446,9 @@ def get( if tag is None: if error_if_not_found: - log.debug("Tag not found", name=name) - raise TagDoesNotExistError + raise EntityDoesNotExistError( + RESOURCE_TYPE, name=name, group_id=group_id + ) return None diff --git a/src/dioptra/restapi/v1/users/__init__.py b/src/dioptra/restapi/v1/users/__init__.py index 0491fe75b..ff72e5b3b 100644 --- a/src/dioptra/restapi/v1/users/__init__.py +++ b/src/dioptra/restapi/v1/users/__init__.py @@ -15,6 +15,3 @@ # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode """The users endpoint subpackage.""" -from . import errors - -__all__ = ["errors"] diff --git a/src/dioptra/restapi/v1/users/errors.py b/src/dioptra/restapi/v1/users/errors.py deleted file mode 100644 index 12ab460f9..000000000 --- a/src/dioptra/restapi/v1/users/errors.py +++ /dev/null @@ -1,115 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the user endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class NoCurrentUserError(Exception): - """There is no currently logged-in user.""" - - -class UserPasswordChangeError(Exception): - """Password change failed.""" - - -class UserPasswordChangeSamePasswordError(Exception): - """Password change failed.""" - - -class UserPasswordExpiredError(Exception): - """Password expired.""" - - -class UserPasswordVerificationError(Exception): - """Password verification failed.""" - - -class UsernameNotAvailableError(Exception): - """The username is not available.""" - - -class UserEmailNotAvailableError(Exception): - """The email address is not available.""" - - -class UserDoesNotExistError(Exception): - """The requested user does not exist.""" - - -class UserRegistrationError(Exception): - """The user registration form contains invalid parameters.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(NoCurrentUserError) - def handle_no_current_user_error(error): - return {"message": "There is no currently logged-in user"}, 401 - - @api.errorhandler(UserPasswordChangeError) - def handle_user_password_change_error_error(error): - return {"message": "Password Change Failed"}, 403 - - @api.errorhandler(UserPasswordChangeSamePasswordError) - def handle_user_password_change_same_error_error(error): - return { - "message": "Password Change Failed - The provided password matches" - "the existing password. Please provide a different password." - }, 403 - - @api.errorhandler(UserPasswordExpiredError) - def handle_user_password_expired_error(error): - return {"message": "Password expired."}, 401 - - @api.errorhandler(UserPasswordVerificationError) - def handle_user_password_verification_error_error(error): - return {"message": "Password Verification Failed"}, 403 - - @api.errorhandler(UserDoesNotExistError) - def handle_user_does_not_exist_error(error): - return {"message": "Not Found - The requested user does not exist"}, 404 - - @api.errorhandler(UsernameNotAvailableError) - def handle_username_not_available_error(error): - return ( - { - "message": "Bad Request - The username on the registration form " - "is not available. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(UserEmailNotAvailableError) - def handle_email_not_available_error(error): - return ( - { - "message": "Bad Request - The email on the registration form " - "is not available. Please select another and resubmit." - }, - 400, - ) - - @api.errorhandler(UserRegistrationError) - def handle_user_registration_error(error): - return ( - { - "message": "Bad Request - The user registration form contains " - "invalid parameters. Please verify and resubmit." - }, - 400, - ) diff --git a/src/dioptra/restapi/v1/users/service.py b/src/dioptra/restapi/v1/users/service.py index eacda9a91..7f425059c 100644 --- a/src/dioptra/restapi/v1/users/service.py +++ b/src/dioptra/restapi/v1/users/service.py @@ -29,7 +29,15 @@ from dioptra.restapi.db import db, models from dioptra.restapi.db.models.constants import user_lock_types -from dioptra.restapi.errors import BackendDatabaseError +from dioptra.restapi.errors import ( + BackendDatabaseError, + EntityDoesNotExistError, + EntityExistsError, + NoCurrentUserError, + QueryParameterValidationError, + UserPasswordChangeError, + UserPasswordError, +) from dioptra.restapi.v1.groups.service import GroupMemberService, GroupNameService from dioptra.restapi.v1.plugin_parameter_types.service import ( BuiltinPluginParameterTypeService, @@ -37,18 +45,6 @@ from dioptra.restapi.v1.shared.password_service import PasswordService from dioptra.restapi.v1.shared.search_parser import construct_sql_query_filters -from .errors import ( - NoCurrentUserError, - UserDoesNotExistError, - UserEmailNotAvailableError, - UsernameNotAvailableError, - UserPasswordChangeError, - UserPasswordChangeSamePasswordError, - UserPasswordExpiredError, - UserPasswordVerificationError, - UserRegistrationError, -) - LOGGER: BoundLogger = structlog.stdlib.get_logger() DEFAULT_GROUP_NAME: Final[str] = "public" @@ -128,17 +124,19 @@ def create( log: BoundLogger = kwargs.get("log", LOGGER.new()) if password != confirm_password: - raise UserRegistrationError( - "The password and confirmation password did not match." + raise QueryParameterValidationError( + "password", "equivalence", password="***", confirmation="***" ) - if self._user_name_service.get(username, log=log) is not None: - log.debug("Username already exists", username=username) - raise UsernameNotAvailableError + duplicate = self._user_name_service.get(username, log=log) + if duplicate is not None: + raise EntityExistsError("User", duplicate.user_id, username=username) - if self._get_user_by_email(email_address, log=log) is not None: - log.debug("Email already exists", email_address=email_address) - raise UserEmailNotAvailableError + duplicate = self._get_user_by_email(email_address, log=log) + if duplicate is not None: + raise EntityExistsError( + "User", duplicate.user_id, email_address=email_address + ) hashed_password = self._user_password_service.hash(password, log=log) new_user: models.User = models.User( @@ -237,7 +235,7 @@ def _get_user_by_email( The user object if found, otherwise None. Raises: - UserDoesNotExistError: If the user is not found and `error_if_not_found` + EntityDoesNotExistError: If the user is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -250,8 +248,7 @@ def _get_user_by_email( if user is None: if error_if_not_found: - log.debug("User not found", email_address=email_address) - raise UserDoesNotExistError + raise EntityDoesNotExistError("User", email_address=email_address) return None @@ -317,7 +314,7 @@ def get( The user object if found, otherwise None. Raises: - UserDoesNotExistError: If the user is not found and `error_if_not_found` + EntityDoesNotExistError: If the user is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -328,8 +325,7 @@ def get( if user is None: if error_if_not_found: - log.debug("User not found", user_id=user_id) - raise UserDoesNotExistError + raise EntityDoesNotExistError("User", user_id=user_id) return None @@ -399,10 +395,9 @@ def get(self, **kwargs) -> models.User: Raises: NoCurrentUserError: If there is no current user. """ - log: BoundLogger = kwargs.get("log", LOGGER.new()) + log: BoundLogger = kwargs.get("log", LOGGER.new()) # noqa: F841 if not current_user.is_authenticated: - log.debug("There is no current user.") raise NoCurrentUserError return cast(models.User, current_user) @@ -527,7 +522,7 @@ def get( The user object if found, otherwise None. Raises: - UserDoesNotExistError: If the user is not found and `error_if_not_found` + EntityDoesNotExistError: If the user is not found and `error_if_not_found` is True. """ log: BoundLogger = kwargs.get("log", LOGGER.new()) @@ -538,8 +533,7 @@ def get( if user is None: if error_if_not_found: - log.debug("User not found", username=username) - raise UserDoesNotExistError + raise EntityDoesNotExistError("User", username=username) return None @@ -595,12 +589,10 @@ def authenticate( ) if not authenticated and error_if_failed: - log.debug("Password authentication failed.") - raise UserPasswordVerificationError + raise UserPasswordError("Password authentication failed.") if expiration_date < current_timestamp: - log.debug("Password expired") - raise UserPasswordExpiredError + raise UserPasswordError("Password expired.") return authenticated @@ -637,15 +629,17 @@ def change( if not self._password_service.verify( password=current_password, hashed_password=str(user.password), log=log ): - raise UserPasswordChangeError + raise UserPasswordChangeError("Invalid Current Password.") if new_password != confirm_new_password: - raise UserPasswordChangeError + raise UserPasswordChangeError( + "Confirmation password does not match new password." + ) if self._password_service.verify( password=new_password, hashed_password=str(user.password), log=log ): - raise UserPasswordChangeSamePasswordError + raise UserPasswordChangeError("New password matches old password.") timestamp = datetime.datetime.now(tz=datetime.timezone.utc) user.password = self._password_service.hash(password=new_password, log=log) diff --git a/src/dioptra/restapi/v1/workflows/errors.py b/src/dioptra/restapi/v1/workflows/errors.py deleted file mode 100644 index c5723ec4a..000000000 --- a/src/dioptra/restapi/v1/workflows/errors.py +++ /dev/null @@ -1,41 +0,0 @@ -# This Software (Dioptra) is being made available as a public service by the -# National Institute of Standards and Technology (NIST), an Agency of the United -# States Department of Commerce. This software was developed in part by employees of -# NIST and in part by NIST contractors. Copyright in portions of this software that -# were developed by NIST contractors has been licensed or assigned to NIST. Pursuant -# to Title 17 United States Code Section 105, works of NIST employees are not -# subject to copyright protection in the United States. However, NIST may hold -# international copyright in software created by its employees and domestic -# copyright (or licensing rights) in portions of software that were assigned or -# licensed to NIST. To the extent that NIST holds copyright in this software, it is -# being made available under the Creative Commons Attribution 4.0 International -# license (CC BY 4.0). The disclaimers of the CC BY 4.0 license apply to all parts -# of the software developed or licensed by NIST. -# -# ACCESS THE FULL CC BY 4.0 LICENSE HERE: -# https://creativecommons.org/licenses/by/4.0/legalcode -"""Error handlers for the workflows endpoints.""" -from __future__ import annotations - -from flask_restx import Api - - -class JobEntryPointDoesNotExistError(Exception): - """The job's entry point does not exist.""" - - -class JobExperimentDoesNotExistError(Exception): - """The experiment associated with the job does not exist.""" - - -def register_error_handlers(api: Api) -> None: - @api.errorhandler(JobEntryPointDoesNotExistError) - def handle_experiment_job_does_not_exist_error(error): - return {"message": "Not Found - The job's entry point does not exist"}, 404 - - @api.errorhandler(JobExperimentDoesNotExistError) - def handle_experiment_does_not_exist_error(error): - return { - "message": "Not Found - The experiment associated with the job does not " - "exist" - }, 404 diff --git a/src/dioptra/restapi/v1/workflows/lib/run_dioptra_job.py.tmpl b/src/dioptra/restapi/v1/workflows/lib/run_dioptra_job.py.tmpl index 7abca7ce8..69c6ff0f8 100644 --- a/src/dioptra/restapi/v1/workflows/lib/run_dioptra_job.py.tmpl +++ b/src/dioptra/restapi/v1/workflows/lib/run_dioptra_job.py.tmpl @@ -15,17 +15,16 @@ # ACCESS THE FULL CC BY 4.0 LICENSE HERE: # https://creativecommons.org/licenses/by/4.0/legalcode import json -import uuid from pathlib import Path -from typing import Any, Mapping, MutableMapping, Protocol, cast +from typing import Any, Mapping, MutableMapping, cast import mlflow import structlog import yaml from structlog.stdlib import BoundLogger -from dioptra.sdk.utilities.contexts import env_vars -from dioptra.sdk.utilities.contexts import sys_path_dirs +from dioptra.client import DioptraClient +from dioptra.sdk.utilities.contexts import env_vars, sys_path_dirs from dioptra.task_engine.issues import IssueSeverity from dioptra.task_engine.task_engine import run_experiment from dioptra.task_engine.validation import validate @@ -40,17 +39,10 @@ JOB_YAML_PATH = "${task_engine_yaml_path}" JOB_PARAMS_JSON_PATH = "${job_params_json_path}" -class SimpleDioptraClient(Protocol): - def set_job_mlflow_run_id( - self, job_id: int, mlflow_run_id: str | uuid.UUID - ) -> None: - ... # fmt: skip - - def main( plugins_dir: str | Path = "plugins", enable_mlflow_tracking: bool = False, - dioptra_client: SimpleDioptraClient | None = None, + dioptra_client: DioptraClient[dict[str, Any]] | None = None, logger: BoundLogger | None = None, ) -> None: log = logger or LOGGER.new(job_id=JOB_ID, experiment_id=EXPERIMENT_ID) # noqa: F841 @@ -123,7 +115,7 @@ def _run_job( def _run_mlflow_tracked_job( - dioptra_client: SimpleDioptraClient, + dioptra_client: DioptraClient[dict[str, Any]], plugins_dir: Path, job_yaml: Mapping[str, Any], job_parameters: MutableMapping[str, Any], @@ -144,7 +136,7 @@ def _run_mlflow_tracked_job( active_run = mlflow.start_run() try: - dioptra_client.set_job_mlflow_run_id( + dioptra_client.jobs.set_mlflow_run_id( job_id=JOB_ID, mlflow_run_id=active_run.info.run_id ) mlflow.set_tag(DIOPTRA_JOB_ID, JOB_ID) diff --git a/src/dioptra/restapi/v1/workflows/lib/views.py b/src/dioptra/restapi/v1/workflows/lib/views.py index 57f273ade..d4bf3340d 100644 --- a/src/dioptra/restapi/v1/workflows/lib/views.py +++ b/src/dioptra/restapi/v1/workflows/lib/views.py @@ -19,8 +19,13 @@ from structlog.stdlib import BoundLogger from dioptra.restapi.db import db, models - -from ..errors import JobEntryPointDoesNotExistError +from dioptra.restapi.errors import EntityDoesNotExistError +from dioptra.restapi.v1.entrypoints.service import ( + RESOURCE_TYPE as ENTRYPONT_RESOURCE_TYPE, +) +from dioptra.restapi.v1.experiments.service import ( + RESOURCE_TYPE as EXPERIMENT_RESOURCE_TYPE, +) LOGGER: BoundLogger = structlog.stdlib.get_logger() @@ -48,11 +53,7 @@ def get_entry_point( entry_point = db.session.scalar(entry_point_stmt) if entry_point is None: - log.debug( - "The job's entrypoint does not exist", - job_id=job_id, - ) - raise JobEntryPointDoesNotExistError + raise EntityDoesNotExistError(ENTRYPONT_RESOURCE_TYPE, job_id=job_id) return entry_point @@ -78,11 +79,7 @@ def get_experiment(job_id: int, logger: BoundLogger | None = None) -> models.Exp experiment = db.session.scalar(experiment_stmt) if experiment is None: - log.debug( - "The experiment associated with the job does not exist", - job_id=job_id, - ) - raise JobEntryPointDoesNotExistError + raise EntityDoesNotExistError(EXPERIMENT_RESOURCE_TYPE, job_id=job_id) return experiment diff --git a/src/dioptra/restapi/v1/workflows/service.py b/src/dioptra/restapi/v1/workflows/service.py index 77be2e77e..d5769e274 100644 --- a/src/dioptra/restapi/v1/workflows/service.py +++ b/src/dioptra/restapi/v1/workflows/service.py @@ -20,7 +20,8 @@ import structlog from structlog.stdlib import BoundLogger -from .lib import package_job_files, views +from .lib import views +from .lib.package_job_files import package_job_files from .schema import FileTypes LOGGER: BoundLogger = structlog.stdlib.get_logger() diff --git a/src/dioptra/rq/tasks/run_v1_dioptra_job.py b/src/dioptra/rq/tasks/run_v1_dioptra_job.py index 524b0d40a..38ed3d602 100644 --- a/src/dioptra/rq/tasks/run_v1_dioptra_job.py +++ b/src/dioptra/rq/tasks/run_v1_dioptra_job.py @@ -18,22 +18,13 @@ import os import tarfile import tempfile -import uuid from pathlib import Path -from typing import Final, cast -from urllib.parse import urlencode, urlparse, urlunparse +from typing import Final -import requests import structlog from structlog.stdlib import BoundLogger -from dioptra.restapi.routes import ( - V1_AUTH_ROUTE, - V1_EXPERIMENTS_ROUTE, - V1_JOBS_ROUTE, - V1_ROOT, - V1_WORKFLOWS_ROUTE, -) +from dioptra.client import connect_json_dioptra_client from dioptra.sdk.utilities.contexts import sys_path_dirs from dioptra.sdk.utilities.paths import set_cwd @@ -44,94 +35,6 @@ ENV_DIOPTRA_WORKER_PASSWORD: Final[str] = "DIOPTRA_WORKER_PASSWORD" ENV_MLFLOW_S3_ENDPOINT_URL: Final[str] = "MLFLOW_S3_ENDPOINT_URL" -DOWNLOAD_CHUNK_SIZE: Final[int] = 10 * 1024 -TAR_GZ_FILE_TYPE: Final[str] = "tar_gz" -TAR_GZ_EXTENSION: Final[str] = ".tar.gz" - -JOB_FILES_DOWNLOAD_ENDPOINT: Final[str] = f"{V1_WORKFLOWS_ROUTE}/jobFilesDownload" - - -class SimpleDioptraClient(object): - def __init__(self, username: str, password: str, api_url: str): - self._api_scheme, self._api_netloc = self._extract_scheme_and_netloc(api_url) - self._username = username - self._password = password - self._session: requests.Session | None = None - - @property - def session(self) -> requests.Session: - if self._session is None: - self.login() - - return cast(requests.Session, self._session) - - def login(self) -> None: - if self._session is None: - self._session = requests.Session() - - response = self._session.post( - self._build_url(f"{V1_AUTH_ROUTE}/login"), - json={"username": self._username, "password": self._password}, - ) - response.raise_for_status() - - def download_job_files(self, job_id: int, output_dir: Path) -> Path: - url = self._build_url( - JOB_FILES_DOWNLOAD_ENDPOINT, - query_params={"jobId": str(job_id), "fileType": TAR_GZ_FILE_TYPE}, - ) - job_files_path = (output_dir / "job_files").with_suffix(TAR_GZ_EXTENSION) - with ( - self.session.get(url, stream=True) as response, - job_files_path.open(mode="wb") as f, - ): - for chunk in response.iter_content(chunk_size=DOWNLOAD_CHUNK_SIZE): - f.write(chunk) - - return job_files_path - - def set_job_status(self, job_id: int, experiment_id: int, status: str) -> None: - url = self._build_url( - f"{V1_EXPERIMENTS_ROUTE}/{experiment_id}/jobs/{job_id}/status" - ) - response = self.session.put(url, json={"status": status}) - response.raise_for_status() - - def set_job_mlflow_run_id( - self, job_id: int, mlflow_run_id: str | uuid.UUID - ) -> None: - url = self._build_url(f"{V1_JOBS_ROUTE}/{job_id}/mlflowRun") - payload = { - "mlflowRunId": ( - mlflow_run_id.hex - if isinstance(mlflow_run_id, uuid.UUID) - else mlflow_run_id - ) - } - response = self.session.post(url, json=payload) - response.raise_for_status() - - def _build_url( - self, endpoint: str, query_params: dict[str, str] | None = None - ) -> str: - query_params = query_params or {} - - return urlunparse( - ( - self._api_scheme, - self._api_netloc, - f"/{V1_ROOT}/{endpoint}", - "", - urlencode(query_params), - "", - ) - ) - - @staticmethod - def _extract_scheme_and_netloc(api_url: str) -> tuple[str, str]: - parsed_api_url = urlparse(url=api_url) - return parsed_api_url.scheme, parsed_api_url.netloc - def run_v1_dioptra_job(job_id: int, experiment_id: int) -> None: # noqa: C901 """Fetches the job files from the Dioptra API and runs the job. @@ -154,19 +57,24 @@ def run_v1_dioptra_job(job_id: int, experiment_id: int) -> None: # noqa: C901 f"{ENV_DIOPTRA_WORKER_PASSWORD} environment variable is not set" ) - if (api_url := os.getenv(ENV_DIOPTRA_API)) is None: + # Instantiate a Dioptra client and login using worker's authentication details + try: + client = connect_json_dioptra_client() + + except ValueError: log.error(f"{ENV_DIOPTRA_API} environment variable is not set") - raise ValueError(f"{ENV_DIOPTRA_API} environment variable is not set") + raise ValueError(f"{ENV_DIOPTRA_API} environment variable is not set") from None - # Instantiate a Dioptra client and login using worker's authentication details - client = SimpleDioptraClient(username=username, password=password, api_url=api_url) + client.auth.login(username=username, password=password) # Set Dioptra Job status to "started" - client.set_job_status(job_id=job_id, experiment_id=experiment_id, status="started") + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="started" + ) if os.getenv(ENV_MLFLOW_S3_ENDPOINT_URL) is None: - client.set_job_status( - job_id=job_id, experiment_id=experiment_id, status="failed" + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="failed" ) log.error(f"{ENV_MLFLOW_S3_ENDPOINT_URL} environment variable is not set") raise ValueError( @@ -181,13 +89,13 @@ def run_v1_dioptra_job(job_id: int, experiment_id: int) -> None: # noqa: C901 # Use client to download the job files for the provided job_id try: - job_files_package = client.download_job_files( + job_files_package = client.workflows.download_job_files( job_id=job_id, output_dir=working_dir ) except Exception as e: - client.set_job_status( - job_id=job_id, experiment_id=experiment_id, status="failed" + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="failed" ) log.exception("Could not download job files") raise e @@ -198,8 +106,8 @@ def run_v1_dioptra_job(job_id: int, experiment_id: int) -> None: # noqa: C901 tar.extractall(path=working_dir, filter="data") except Exception as e: - client.set_job_status( - job_id=job_id, experiment_id=experiment_id, status="failed" + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="failed" ) log.exception("Could not extract from tar file") raise e @@ -210,8 +118,8 @@ def run_v1_dioptra_job(job_id: int, experiment_id: int) -> None: # noqa: C901 run_dioptra_job = importlib.import_module(run_dioptra_job_path.stem) except Exception as e: - client.set_job_status( - job_id=job_id, experiment_id=experiment_id, status="failed" + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="failed" ) log.exception("Could not import run_dioptra_job.py") raise e @@ -224,13 +132,13 @@ def run_v1_dioptra_job(job_id: int, experiment_id: int) -> None: # noqa: C901 dioptra_client=client, logger=log, ) - client.set_job_status( - job_id=job_id, experiment_id=experiment_id, status="finished" + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="finished" ) except Exception as e: - client.set_job_status( - job_id=job_id, experiment_id=experiment_id, status="failed" + client.experiments.jobs.set_status( + experiment_id=experiment_id, job_id=job_id, status="failed" ) log.exception("Error running job") raise e diff --git a/src/frontend/package-lock.json b/src/frontend/package-lock.json new file mode 100644 index 000000000..f34b55fa4 --- /dev/null +++ b/src/frontend/package-lock.json @@ -0,0 +1,3927 @@ +{ + "name": "frontend", + "version": "0.0.0", + "lockfileVersion": 3, + "requires": true, + "packages": { + "": { + "name": "frontend", + "version": "0.0.0", + "dependencies": { + "@codemirror/lang-python": "^6.1.6", + "@codemirror/lang-yaml": "^6.0.0", + "@codemirror/lint": "^6.5.0", + "@codemirror/theme-one-dark": "^6.1.2", + "@quasar/extras": "^1.16.9", + "axios": "^1.6.7", + "codemirror": "^6.0.1", + "js-yaml": "^4.1.0", + "pinia": "^2.1.7", + "quasar": "^2.14.2", + "vue": "^3.3.11", + "vue-codemirror": "^6.1.1", + "vue-router": "^4.2.5", + "yaml": "^2.5.0" + }, + "devDependencies": { + "@quasar/vite-plugin": "^1.6.0", + "@rushstack/eslint-patch": "^1.3.3", + "@tsconfig/node18": "^18.2.2", + "@types/node": "^18.19.3", + "@vitejs/plugin-vue": "^4.5.2", + "@vue/eslint-config-prettier": "^8.0.0", + "@vue/eslint-config-typescript": "^12.0.0", + "@vue/tsconfig": "^0.5.0", + "eslint": "^8.49.0", + "eslint-plugin-vue": "^9.17.0", + "npm-run-all2": "^6.1.1", + "prettier": "^3.0.3", + "sass": "^1.70.0", + "typescript": "~5.3.0", + "vite": "^5.0.10", + "vue-tsc": "^1.8.25" + } + }, + "node_modules/@babel/parser": { + "version": "7.24.8", + "resolved": "https://registry.npmjs.org/@babel/parser/-/parser-7.24.8.tgz", + "integrity": "sha512-WzfbgXOkGzZiXXCqk43kKwZjzwx4oulxZi3nq2TYL9mOjQv6kYwul9mz6ID36njuL7Xkp6nJEfok848Zj10j/w==", + "license": "MIT", + "bin": { + "parser": "bin/babel-parser.js" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/@codemirror/autocomplete": { + "version": "6.17.0", + "resolved": "https://registry.npmjs.org/@codemirror/autocomplete/-/autocomplete-6.17.0.tgz", + "integrity": "sha512-fdfj6e6ZxZf8yrkMHUSJJir7OJkHkZKaOZGzLWIYp2PZ3jd+d+UjG8zVPqJF6d3bKxkhvXTPan/UZ1t7Bqm0gA==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.17.0", + "@lezer/common": "^1.0.0" + }, + "peerDependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@codemirror/commands": { + "version": "6.6.0", + "resolved": "https://registry.npmjs.org/@codemirror/commands/-/commands-6.6.0.tgz", + "integrity": "sha512-qnY+b7j1UNcTS31Eenuc/5YJB6gQOzkUoNmJQc0rznwqSRpeaWWpjkWy2C/MPTcePpsKJEM26hXrOXl1+nceXg==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.4.0", + "@codemirror/view": "^6.27.0", + "@lezer/common": "^1.1.0" + } + }, + "node_modules/@codemirror/lang-python": { + "version": "6.1.6", + "resolved": "https://registry.npmjs.org/@codemirror/lang-python/-/lang-python-6.1.6.tgz", + "integrity": "sha512-ai+01WfZhWqM92UqjnvorkxosZ2aq2u28kHvr+N3gu012XqY2CThD67JPMHnGceRfXPDBmn1HnyqowdpF57bNg==", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^6.3.2", + "@codemirror/language": "^6.8.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.2.1", + "@lezer/python": "^1.1.4" + } + }, + "node_modules/@codemirror/lang-yaml": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/@codemirror/lang-yaml/-/lang-yaml-6.1.1.tgz", + "integrity": "sha512-HV2NzbK9bbVnjWxwObuZh5FuPCowx51mEfoFT9y3y+M37fA3+pbxx4I7uePuygFzDsAmCTwQSc/kXh/flab4uw==", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.2.0", + "@lezer/yaml": "^1.0.0" + } + }, + "node_modules/@codemirror/language": { + "version": "6.10.2", + "resolved": "https://registry.npmjs.org/@codemirror/language/-/language-6.10.2.tgz", + "integrity": "sha512-kgbTYTo0Au6dCSc/TFy7fK3fpJmgHDv1sG1KNQKJXVi+xBTEeBPY/M30YXiU6mMXeH+YIDLsbrT4ZwNRdtF+SA==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.23.0", + "@lezer/common": "^1.1.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0", + "style-mod": "^4.0.0" + } + }, + "node_modules/@codemirror/lint": { + "version": "6.8.1", + "resolved": "https://registry.npmjs.org/@codemirror/lint/-/lint-6.8.1.tgz", + "integrity": "sha512-IZ0Y7S4/bpaunwggW2jYqwLuHj0QtESf5xcROewY6+lDNwZ/NzvR4t+vpYgg9m7V8UXLPYqG+lu3DF470E5Oxg==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/search": { + "version": "6.5.6", + "resolved": "https://registry.npmjs.org/@codemirror/search/-/search-6.5.6.tgz", + "integrity": "sha512-rpMgcsh7o0GuCDUXKPvww+muLA1pDJaFrpq/CCHtpQJYz8xopu4D1hPcKRoDD0YlF8gZaqTNIRa4VRBWyhyy7Q==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "crelt": "^1.0.5" + } + }, + "node_modules/@codemirror/state": { + "version": "6.4.1", + "resolved": "https://registry.npmjs.org/@codemirror/state/-/state-6.4.1.tgz", + "integrity": "sha512-QkEyUiLhsJoZkbumGZlswmAhA7CBU02Wrz7zvH4SrcifbsqwlXShVXg65f3v/ts57W3dqyamEriMhij1Z3Zz4A==", + "license": "MIT" + }, + "node_modules/@codemirror/theme-one-dark": { + "version": "6.1.2", + "resolved": "https://registry.npmjs.org/@codemirror/theme-one-dark/-/theme-one-dark-6.1.2.tgz", + "integrity": "sha512-F+sH0X16j/qFLMAfbciKTxVOwkdAS336b7AXTKOZhy8BR3eH/RelsnLgLFINrpST63mmN2OuwUt0W2ndUgYwUA==", + "license": "MIT", + "dependencies": { + "@codemirror/language": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0", + "@lezer/highlight": "^1.0.0" + } + }, + "node_modules/@codemirror/view": { + "version": "6.28.5", + "resolved": "https://registry.npmjs.org/@codemirror/view/-/view-6.28.5.tgz", + "integrity": "sha512-NkUtfUa1lV7Jqg5DfHE/uLl7jKyoymDkaueMQXzePYuezL7FwX3ATANy74iAGlHCGe25hBGB0R+I5dC5EZ5JBg==", + "license": "MIT", + "dependencies": { + "@codemirror/state": "^6.4.0", + "style-mod": "^4.1.0", + "w3c-keyname": "^2.2.4" + } + }, + "node_modules/@esbuild/aix-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/aix-ppc64/-/aix-ppc64-0.21.5.tgz", + "integrity": "sha512-1SDgH6ZSPTlggy1yI6+Dbkiz8xzpHJEVAlF/AM1tHPLsf5STom9rwtjE4hKAF20FfXXNTFqEYXyJNWh1GiZedQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "aix" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm/-/android-arm-0.21.5.tgz", + "integrity": "sha512-vCPvzSjpPHEi1siZdlvAlsPxXl7WbOVUBBAowWug4rJHb68Ox8KualB+1ocNvT5fjv6wpkX6o/iEpbDrf68zcg==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-arm64/-/android-arm64-0.21.5.tgz", + "integrity": "sha512-c0uX9VAUBQ7dTDCjq+wdyGLowMdtR/GoC2U5IYk/7D1H1JYC0qseD7+11iMP2mRLN9RcCMRcjC4YMclCzGwS/A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/android-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/android-x64/-/android-x64-0.21.5.tgz", + "integrity": "sha512-D7aPRUUNHRBwHxzxRvp856rjUHRFW1SdQATKXH2hqA0kAZb1hKmi02OpYRacl0TxIGz/ZmXWlbZgjwWYaCakTA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-arm64/-/darwin-arm64-0.21.5.tgz", + "integrity": "sha512-DwqXqZyuk5AiWWf3UfLiRDJ5EDd49zg6O9wclZ7kUMv2WRFr4HKjXp/5t8JZ11QbQfUS6/cRCKGwYhtNAY88kQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/darwin-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/darwin-x64/-/darwin-x64-0.21.5.tgz", + "integrity": "sha512-se/JjF8NlmKVG4kNIuyWMV/22ZaerB+qaSi5MdrXtd6R08kvs2qCN4C09miupktDitvh8jRFflwGFBQcxZRjbw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-arm64/-/freebsd-arm64-0.21.5.tgz", + "integrity": "sha512-5JcRxxRDUJLX8JXp/wcBCy3pENnCgBR9bN6JsY4OmhfUtIHe3ZW0mawA7+RDAcMLrMIZaf03NlQiX9DGyB8h4g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/freebsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/freebsd-x64/-/freebsd-x64-0.21.5.tgz", + "integrity": "sha512-J95kNBj1zkbMXtHVH29bBriQygMXqoVQOQYA+ISs0/2l3T9/kj42ow2mpqerRBxDJnmkUDCaQT/dfNXWX/ZZCQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "freebsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm/-/linux-arm-0.21.5.tgz", + "integrity": "sha512-bPb5AHZtbeNGjCKVZ9UGqGwo8EUu4cLq68E95A53KlxAPRmUyYv2D6F0uUI65XisGOL1hBP5mTronbgo+0bFcA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-arm64/-/linux-arm64-0.21.5.tgz", + "integrity": "sha512-ibKvmyYzKsBeX8d8I7MH/TMfWDXBF3db4qM6sy+7re0YXya+K1cem3on9XgdT2EQGMu4hQyZhan7TeQ8XkGp4Q==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ia32/-/linux-ia32-0.21.5.tgz", + "integrity": "sha512-YvjXDqLRqPDl2dvRODYmmhz4rPeVKYvppfGYKSNGdyZkA01046pLWyRKKI3ax8fbJoK5QbxblURkwK/MWY18Tg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-loong64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-loong64/-/linux-loong64-0.21.5.tgz", + "integrity": "sha512-uHf1BmMG8qEvzdrzAqg2SIG/02+4/DHB6a9Kbya0XDvwDEKCoC8ZRWI5JJvNdUjtciBGFQ5PuBlpEOXQj+JQSg==", + "cpu": [ + "loong64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-mips64el": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-mips64el/-/linux-mips64el-0.21.5.tgz", + "integrity": "sha512-IajOmO+KJK23bj52dFSNCMsz1QP1DqM6cwLUv3W1QwyxkyIWecfafnI555fvSGqEKwjMXVLokcV5ygHW5b3Jbg==", + "cpu": [ + "mips64el" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-ppc64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-ppc64/-/linux-ppc64-0.21.5.tgz", + "integrity": "sha512-1hHV/Z4OEfMwpLO8rp7CvlhBDnjsC3CttJXIhBi+5Aj5r+MBvy4egg7wCbe//hSsT+RvDAG7s81tAvpL2XAE4w==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-riscv64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-riscv64/-/linux-riscv64-0.21.5.tgz", + "integrity": "sha512-2HdXDMd9GMgTGrPWnJzP2ALSokE/0O5HhTUvWIbD3YdjME8JwvSCnNGBnTThKGEB91OZhzrJ4qIIxk/SBmyDDA==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-s390x": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-s390x/-/linux-s390x-0.21.5.tgz", + "integrity": "sha512-zus5sxzqBJD3eXxwvjN1yQkRepANgxE9lgOW2qLnmr8ikMTphkjgXu1HR01K4FJg8h1kEEDAqDcZQtbrRnB41A==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/linux-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/linux-x64/-/linux-x64-0.21.5.tgz", + "integrity": "sha512-1rYdTpyv03iycF1+BhzrzQJCdOuAOtaqHTWJZCWvijKD2N5Xu0TtVC8/+1faWqcP9iBCWOmjmhoH94dH82BxPQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/netbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/netbsd-x64/-/netbsd-x64-0.21.5.tgz", + "integrity": "sha512-Woi2MXzXjMULccIwMnLciyZH4nCIMpWQAs049KEeMvOcNADVxo0UBIQPfSmxB3CWKedngg7sWZdLvLczpe0tLg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "netbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/openbsd-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/openbsd-x64/-/openbsd-x64-0.21.5.tgz", + "integrity": "sha512-HLNNw99xsvx12lFBUwoT8EVCsSvRNDVxNpjZ7bPn947b8gJPzeHWyNVhFsaerc0n3TsbOINvRP2byTZ5LKezow==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "openbsd" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/sunos-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/sunos-x64/-/sunos-x64-0.21.5.tgz", + "integrity": "sha512-6+gjmFpfy0BHU5Tpptkuh8+uw3mnrvgs+dSPQXQOv3ekbordwnzTVEb4qnIvQcYXq6gzkyTnoZ9dZG+D4garKg==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "sunos" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-arm64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-arm64/-/win32-arm64-0.21.5.tgz", + "integrity": "sha512-Z0gOTd75VvXqyq7nsl93zwahcTROgqvuAcYDUr+vOv8uHhNSKROyU961kgtCD1e95IqPKSQKH7tBTslnS3tA8A==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-ia32": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-ia32/-/win32-ia32-0.21.5.tgz", + "integrity": "sha512-SWXFF1CL2RVNMaVs+BBClwtfZSvDgtL//G/smwAc5oVK/UPu2Gu9tIaRgFmYFFKrmg3SyAjSrElf0TiJ1v8fYA==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@esbuild/win32-x64": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/@esbuild/win32-x64/-/win32-x64-0.21.5.tgz", + "integrity": "sha512-tQd/1efJuzPC6rCFwEvLtci/xNFcTZknmXs98FYDfGE4wP9ClFV98nyKrzJKVPMhdDnjzLhdUyMX4PsQAPjwIw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ], + "engines": { + "node": ">=12" + } + }, + "node_modules/@eslint-community/eslint-utils": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/@eslint-community/eslint-utils/-/eslint-utils-4.4.0.tgz", + "integrity": "sha512-1/sA4dwrzBAyeUoQ6oxahHKmrZvsnLCg4RfxW3ZFGGmQkSNQPFNLV9CUEFQP1x9EYXHTo5p6xdhZM1Ne9p/AfA==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-visitor-keys": "^3.3.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.0.0 || ^7.0.0 || >=8.0.0" + } + }, + "node_modules/@eslint-community/regexpp": { + "version": "4.11.0", + "resolved": "https://registry.npmjs.org/@eslint-community/regexpp/-/regexpp-4.11.0.tgz", + "integrity": "sha512-G/M/tIiMrTAxEWRfLfQJMmGNX28IxBg4PBz8XqQhqUHLFI6TL2htpIB1iQCj144V5ee/JaKyT9/WZ0MGZWfA7A==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.0.0 || ^14.0.0 || >=16.0.0" + } + }, + "node_modules/@eslint/eslintrc": { + "version": "2.1.4", + "resolved": "https://registry.npmjs.org/@eslint/eslintrc/-/eslintrc-2.1.4.tgz", + "integrity": "sha512-269Z39MS6wVJtsoUl10L60WdkhJVdPG24Q4eZTH3nnF6lpvSShEK3wQjDX9JRWAUPvPh7COouPpU9IrqaZFvtQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "ajv": "^6.12.4", + "debug": "^4.3.2", + "espree": "^9.6.0", + "globals": "^13.19.0", + "ignore": "^5.2.0", + "import-fresh": "^3.2.1", + "js-yaml": "^4.1.0", + "minimatch": "^3.1.2", + "strip-json-comments": "^3.1.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/@eslint/eslintrc/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@eslint/eslintrc/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@eslint/js": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/@eslint/js/-/js-8.57.0.tgz", + "integrity": "sha512-Ys+3g2TaW7gADOJzPt83SJtCDhMjndcDMFVQ/Tj9iA1BfJzFKD9mAUXT3OenpuPHbI6P/myECxRJrofUsDx/5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + } + }, + "node_modules/@humanwhocodes/config-array": { + "version": "0.11.14", + "resolved": "https://registry.npmjs.org/@humanwhocodes/config-array/-/config-array-0.11.14.tgz", + "integrity": "sha512-3T8LkOmg45BV5FICb15QQMsyUSWrQ8AygVfC7ZG32zOalnqrilm018ZVCw0eapXux8FtA33q8PSRSstjee3jSg==", + "deprecated": "Use @eslint/config-array instead", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "@humanwhocodes/object-schema": "^2.0.2", + "debug": "^4.3.1", + "minimatch": "^3.0.5" + }, + "engines": { + "node": ">=10.10.0" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/@humanwhocodes/config-array/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/@humanwhocodes/module-importer": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/@humanwhocodes/module-importer/-/module-importer-1.0.1.tgz", + "integrity": "sha512-bxveV4V8v5Yb4ncFTT3rPSgZBOpCkjfK0y4oVVVJwIuDVBRMDXrPyXRL988i5ap9m9bnyEEjWfm5WkBmtffLfA==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12.22" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/nzakas" + } + }, + "node_modules/@humanwhocodes/object-schema": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/@humanwhocodes/object-schema/-/object-schema-2.0.3.tgz", + "integrity": "sha512-93zYdMES/c1D69yZiKDBj0V24vqNzB/koF26KPaagAfd3P/4gUlh3Dys5ogAK+Exi9QyzlD8x/08Zt7wIKcDcA==", + "deprecated": "Use @eslint/object-schema instead", + "dev": true, + "license": "BSD-3-Clause" + }, + "node_modules/@jridgewell/sourcemap-codec": { + "version": "1.5.0", + "resolved": "https://registry.npmjs.org/@jridgewell/sourcemap-codec/-/sourcemap-codec-1.5.0.tgz", + "integrity": "sha512-gv3ZRaISU3fjPAgNsriBRqGWQL6quFx04YMPW/zD8XMLsU32mhCCbfbO6KZFLjvYpCZ8zyDEgqsgf+PwPaM7GQ==", + "license": "MIT" + }, + "node_modules/@lezer/common": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/@lezer/common/-/common-1.2.1.tgz", + "integrity": "sha512-yemX0ZD2xS/73llMZIK6KplkjIjf2EvAHcinDi/TfJ9hS25G0388+ClHt6/3but0oOxinTcQHJLDXh6w1crzFQ==", + "license": "MIT" + }, + "node_modules/@lezer/highlight": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@lezer/highlight/-/highlight-1.2.0.tgz", + "integrity": "sha512-WrS5Mw51sGrpqjlh3d4/fOwpEV2Hd3YOkp9DBt4k8XZQcoTHZFB7sx030A6OcahF4J1nDQAa3jXlTVVYH50IFA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/lr": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/@lezer/lr/-/lr-1.4.1.tgz", + "integrity": "sha512-CHsKq8DMKBf9b3yXPDIU4DbH+ZJd/sJdYOW2llbW/HudP5u0VS6Bfq1hLYfgU7uAYGFIyGGQIsSOXGPEErZiJw==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.0.0" + } + }, + "node_modules/@lezer/python": { + "version": "1.1.14", + "resolved": "https://registry.npmjs.org/@lezer/python/-/python-1.1.14.tgz", + "integrity": "sha512-ykDOb2Ti24n76PJsSa4ZoDF0zH12BSw1LGfQXCYJhJyOGiFTfGaX0Du66Ze72R+u/P35U+O6I9m8TFXov1JzsA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.0.0" + } + }, + "node_modules/@lezer/yaml": { + "version": "1.0.3", + "resolved": "https://registry.npmjs.org/@lezer/yaml/-/yaml-1.0.3.tgz", + "integrity": "sha512-GuBLekbw9jDBDhGur82nuwkxKQ+a3W5H0GfaAthDXcAu+XdpS43VlnxA9E9hllkpSP5ellRDKjLLj7Lu9Wr6xA==", + "license": "MIT", + "dependencies": { + "@lezer/common": "^1.2.0", + "@lezer/highlight": "^1.0.0", + "@lezer/lr": "^1.4.0" + } + }, + "node_modules/@nodelib/fs.scandir": { + "version": "2.1.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.scandir/-/fs.scandir-2.1.5.tgz", + "integrity": "sha512-vq24Bq3ym5HEQm2NKCr3yXDwjc7vTsEThRDnkp2DK9p1uqLR+DHurm/NOTo0KG7HYHU7eppKZj3MyqYuMBf62g==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "2.0.5", + "run-parallel": "^1.1.9" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.stat": { + "version": "2.0.5", + "resolved": "https://registry.npmjs.org/@nodelib/fs.stat/-/fs.stat-2.0.5.tgz", + "integrity": "sha512-RkhPPp2zrqDAQA/2jNhnztcPAlv64XdhIp7a7454A5ovI7Bukxgt7MX7udwAu3zg1DcpPU0rz3VV1SeaqvY4+A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/@nodelib/fs.walk": { + "version": "1.2.8", + "resolved": "https://registry.npmjs.org/@nodelib/fs.walk/-/fs.walk-1.2.8.tgz", + "integrity": "sha512-oGB+UxlgWcgQkgwo8GcEGwemoTFt3FIO9ababBmaGwXIoBKZ+GTy0pP185beGg7Llih/NSHSV2XAs1lnznocSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.scandir": "2.1.5", + "fastq": "^1.6.0" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/@pkgr/core": { + "version": "0.1.1", + "resolved": "https://registry.npmjs.org/@pkgr/core/-/core-0.1.1.tgz", + "integrity": "sha512-cq8o4cWH0ibXh9VGi5P20Tu9XF/0fFXl9EUinr9QfTM7a7p0oTA4iJRCQWppXR1Pg8dSM0UCItCkPwsk9qWWYA==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^12.20.0 || ^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, + "node_modules/@quasar/extras": { + "version": "1.16.12", + "resolved": "https://registry.npmjs.org/@quasar/extras/-/extras-1.16.12.tgz", + "integrity": "sha512-hLlb3Buxo38Xg/2w0BTkz98RBh/VH8apZ2r6Fl8YpPgrVQ0diHyN/BVTvIOk5Kch2y38L2kvwOIddsB2UcCuIg==", + "license": "MIT", + "funding": { + "type": "github", + "url": "https://donate.quasar.dev" + } + }, + "node_modules/@quasar/vite-plugin": { + "version": "1.7.0", + "resolved": "https://registry.npmjs.org/@quasar/vite-plugin/-/vite-plugin-1.7.0.tgz", + "integrity": "sha512-ia4w1n4DuPYm92MQLPNpMqLJID1WGGRyVGxkVeg8V+V25Vh3p9QBo++iuXR4sW/bCmzzx66Ko6VStsr1zp90GQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=18" + }, + "funding": { + "type": "github", + "url": "https://donate.quasar.dev" + }, + "peerDependencies": { + "@vitejs/plugin-vue": "^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0", + "quasar": "^2.16.0", + "vite": "^2.0.0 || ^3.0.0 || ^4.0.0 || ^5.0.0", + "vue": "^3.0.0" + } + }, + "node_modules/@rollup/rollup-android-arm-eabi": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm-eabi/-/rollup-android-arm-eabi-4.18.1.tgz", + "integrity": "sha512-lncuC4aHicncmbORnx+dUaAgzee9cm/PbIqgWz1PpXuwc+sa1Ct83tnqUDy/GFKleLiN7ZIeytM6KJ4cAn1SxA==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-android-arm64": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-android-arm64/-/rollup-android-arm64-4.18.1.tgz", + "integrity": "sha512-F/tkdw0WSs4ojqz5Ovrw5r9odqzFjb5LIgHdHZG65dFI1lWTWRVy32KDJLKRISHgJvqUeUhdIvy43fX41znyDg==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "android" + ] + }, + "node_modules/@rollup/rollup-darwin-arm64": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-arm64/-/rollup-darwin-arm64-4.18.1.tgz", + "integrity": "sha512-vk+ma8iC1ebje/ahpxpnrfVQJibTMyHdWpOGZ3JpQ7Mgn/3QNHmPq7YwjZbIE7km73dH5M1e6MRRsnEBW7v5CQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-darwin-x64": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-darwin-x64/-/rollup-darwin-x64-4.18.1.tgz", + "integrity": "sha512-IgpzXKauRe1Tafcej9STjSSuG0Ghu/xGYH+qG6JwsAUxXrnkvNHcq/NL6nz1+jzvWAnQkuAJ4uIwGB48K9OCGA==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ] + }, + "node_modules/@rollup/rollup-linux-arm-gnueabihf": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-gnueabihf/-/rollup-linux-arm-gnueabihf-4.18.1.tgz", + "integrity": "sha512-P9bSiAUnSSM7EmyRK+e5wgpqai86QOSv8BwvkGjLwYuOpaeomiZWifEos517CwbG+aZl1T4clSE1YqqH2JRs+g==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm-musleabihf": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm-musleabihf/-/rollup-linux-arm-musleabihf-4.18.1.tgz", + "integrity": "sha512-5RnjpACoxtS+aWOI1dURKno11d7krfpGDEn19jI8BuWmSBbUC4ytIADfROM1FZrFhQPSoP+KEa3NlEScznBTyQ==", + "cpu": [ + "arm" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-gnu": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-gnu/-/rollup-linux-arm64-gnu-4.18.1.tgz", + "integrity": "sha512-8mwmGD668m8WaGbthrEYZ9CBmPug2QPGWxhJxh/vCgBjro5o96gL04WLlg5BA233OCWLqERy4YUzX3bJGXaJgQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-arm64-musl": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-arm64-musl/-/rollup-linux-arm64-musl-4.18.1.tgz", + "integrity": "sha512-dJX9u4r4bqInMGOAQoGYdwDP8lQiisWb9et+T84l2WXk41yEej8v2iGKodmdKimT8cTAYt0jFb+UEBxnPkbXEQ==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-powerpc64le-gnu": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-powerpc64le-gnu/-/rollup-linux-powerpc64le-gnu-4.18.1.tgz", + "integrity": "sha512-V72cXdTl4EI0x6FNmho4D502sy7ed+LuVW6Ym8aI6DRQ9hQZdp5sj0a2usYOlqvFBNKQnLQGwmYnujo2HvjCxQ==", + "cpu": [ + "ppc64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-riscv64-gnu": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-riscv64-gnu/-/rollup-linux-riscv64-gnu-4.18.1.tgz", + "integrity": "sha512-f+pJih7sxoKmbjghrM2RkWo2WHUW8UbfxIQiWo5yeCaCM0TveMEuAzKJte4QskBp1TIinpnRcxkquY+4WuY/tg==", + "cpu": [ + "riscv64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-s390x-gnu": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-s390x-gnu/-/rollup-linux-s390x-gnu-4.18.1.tgz", + "integrity": "sha512-qb1hMMT3Fr/Qz1OKovCuUM11MUNLUuHeBC2DPPAWUYYUAOFWaxInaTwTQmc7Fl5La7DShTEpmYwgdt2hG+4TEg==", + "cpu": [ + "s390x" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-gnu": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-gnu/-/rollup-linux-x64-gnu-4.18.1.tgz", + "integrity": "sha512-7O5u/p6oKUFYjRbZkL2FLbwsyoJAjyeXHCU3O4ndvzg2OFO2GinFPSJFGbiwFDaCFc+k7gs9CF243PwdPQFh5g==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-linux-x64-musl": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-linux-x64-musl/-/rollup-linux-x64-musl-4.18.1.tgz", + "integrity": "sha512-pDLkYITdYrH/9Cv/Vlj8HppDuLMDUBmgsM0+N+xLtFd18aXgM9Nyqupb/Uw+HeidhfYg2lD6CXvz6CjoVOaKjQ==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "linux" + ] + }, + "node_modules/@rollup/rollup-win32-arm64-msvc": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-arm64-msvc/-/rollup-win32-arm64-msvc-4.18.1.tgz", + "integrity": "sha512-W2ZNI323O/8pJdBGil1oCauuCzmVd9lDmWBBqxYZcOqWD6aWqJtVBQ1dFrF4dYpZPks6F+xCZHfzG5hYlSHZ6g==", + "cpu": [ + "arm64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-ia32-msvc": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-ia32-msvc/-/rollup-win32-ia32-msvc-4.18.1.tgz", + "integrity": "sha512-ELfEX1/+eGZYMaCIbK4jqLxO1gyTSOIlZr6pbC4SRYFaSIDVKOnZNMdoZ+ON0mrFDp4+H5MhwNC1H/AhE3zQLg==", + "cpu": [ + "ia32" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rollup/rollup-win32-x64-msvc": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/@rollup/rollup-win32-x64-msvc/-/rollup-win32-x64-msvc-4.18.1.tgz", + "integrity": "sha512-yjk2MAkQmoaPYCSu35RLJ62+dz358nE83VfTePJRp8CG7aMg25mEJYpXFiD+NcevhX8LxD5OP5tktPXnXN7GDw==", + "cpu": [ + "x64" + ], + "dev": true, + "license": "MIT", + "optional": true, + "os": [ + "win32" + ] + }, + "node_modules/@rushstack/eslint-patch": { + "version": "1.10.3", + "resolved": "https://registry.npmjs.org/@rushstack/eslint-patch/-/eslint-patch-1.10.3.tgz", + "integrity": "sha512-qC/xYId4NMebE6w/V33Fh9gWxLgURiNYgVNObbJl2LZv0GUUItCcCqC5axQSwRaAgaxl2mELq1rMzlswaQ0Zxg==", + "dev": true, + "license": "MIT" + }, + "node_modules/@tsconfig/node18": { + "version": "18.2.4", + "resolved": "https://registry.npmjs.org/@tsconfig/node18/-/node18-18.2.4.tgz", + "integrity": "sha512-5xxU8vVs9/FNcvm3gE07fPbn9tl6tqGGWA9tSlwsUEkBxtRnTsNmwrV8gasZ9F/EobaSv9+nu8AxUKccw77JpQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/estree": { + "version": "1.0.5", + "resolved": "https://registry.npmjs.org/@types/estree/-/estree-1.0.5.tgz", + "integrity": "sha512-/kYRxGDLWzHOB7q+wtSUQlFrtcdUccpfy+X+9iMBpHK8QLLhx2wIPYuS5DYtR9Wa/YlZAbIovy7qVdB1Aq6Lyw==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/json-schema": { + "version": "7.0.15", + "resolved": "https://registry.npmjs.org/@types/json-schema/-/json-schema-7.0.15.tgz", + "integrity": "sha512-5+fP8P8MFNC+AyZCDxrB2pkZFPGzqQWUzpSeuuVLvm8VMcorNYavBqoFcxK8bQz4Qsbn4oUEEem4wDLfcysGHA==", + "dev": true, + "license": "MIT" + }, + "node_modules/@types/node": { + "version": "18.19.40", + "resolved": "https://registry.npmjs.org/@types/node/-/node-18.19.40.tgz", + "integrity": "sha512-MIxieZHrm4Ee8XArBIc+Or9HINt2StOmCbgRcXGSJl8q14svRvkZPe7LJq9HKtTI1SK3wU8b91TjntUm7T69Pg==", + "dev": true, + "license": "MIT", + "dependencies": { + "undici-types": "~5.26.4" + } + }, + "node_modules/@types/semver": { + "version": "7.5.8", + "resolved": "https://registry.npmjs.org/@types/semver/-/semver-7.5.8.tgz", + "integrity": "sha512-I8EUhyrgfLrcTkzV3TSsGyl1tSuPrEDzr0yd5m90UgNxQkyDXULk3b6MlQqTCpZpNtWe1K0hzclnZkTcLBe2UQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/@typescript-eslint/eslint-plugin": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/eslint-plugin/-/eslint-plugin-6.21.0.tgz", + "integrity": "sha512-oy9+hTPCUFpngkEZUSzbf9MxI65wbKFoQYsgPdILTfbUldp5ovUuphZVe4i30emU9M/kP+T64Di0mxl7dSw3MA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/regexpp": "^4.5.1", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/type-utils": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "graphemer": "^1.4.0", + "ignore": "^5.2.4", + "natural-compare": "^1.4.0", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "@typescript-eslint/parser": "^6.0.0 || ^6.0.0-alpha", + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/parser": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/parser/-/parser-6.21.0.tgz", + "integrity": "sha512-tbsV1jPne5CkFQCgPBcDOt30ItF7aJoZL997JSF7MhGQqOeT3svWRYxiqlfA5RUdlHN6Fi+EI9bxqbdyAUZjYQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/scope-manager": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/scope-manager/-/scope-manager-6.21.0.tgz", + "integrity": "sha512-OwLUIWZJry80O99zvqXVEioyniJMa+d2GrqpUTqi5/v5D5rOrppJVBPa0yKCblcigC0/aYAzxxqQ1B+DS2RYsg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/type-utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/type-utils/-/type-utils-6.21.0.tgz", + "integrity": "sha512-rZQI7wHfao8qMX3Rd3xqeYSMCL3SoiSQLBATSiVKARdFGCYSRvmViieZjqc58jKgs8Y8i9YvVVhRbHSTA4VBag==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/typescript-estree": "6.21.0", + "@typescript-eslint/utils": "6.21.0", + "debug": "^4.3.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/types": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/types/-/types-6.21.0.tgz", + "integrity": "sha512-1kFmZ1rOm5epu9NZEZm1kckCDGj5UJEf7P1kliH4LKu/RkwpsfqqGmY2OOcUs18lSlQBKLDYBOGxRVtrMN5lpg==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@typescript-eslint/typescript-estree": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/typescript-estree/-/typescript-estree-6.21.0.tgz", + "integrity": "sha512-6npJTkZcO+y2/kr+z0hc4HwNfrrP4kNYh57ek7yCNlrBjWQ1Y0OS7jiZTkgumrvkX5HkEKXFZkkdFNkaW2wmUQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/visitor-keys": "6.21.0", + "debug": "^4.3.4", + "globby": "^11.1.0", + "is-glob": "^4.0.3", + "minimatch": "9.0.3", + "semver": "^7.5.4", + "ts-api-utils": "^1.0.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@typescript-eslint/utils": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/utils/-/utils-6.21.0.tgz", + "integrity": "sha512-NfWVaC8HP9T8cbKQxHcsJBY5YE1O33+jpMwN45qzWWaPDZgLIbo12toGMWnmhvCpd3sIxkpDw3Wv1B3dYrbDQQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "@types/json-schema": "^7.0.12", + "@types/semver": "^7.5.0", + "@typescript-eslint/scope-manager": "6.21.0", + "@typescript-eslint/types": "6.21.0", + "@typescript-eslint/typescript-estree": "6.21.0", + "semver": "^7.5.4" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + }, + "peerDependencies": { + "eslint": "^7.0.0 || ^8.0.0" + } + }, + "node_modules/@typescript-eslint/visitor-keys": { + "version": "6.21.0", + "resolved": "https://registry.npmjs.org/@typescript-eslint/visitor-keys/-/visitor-keys-6.21.0.tgz", + "integrity": "sha512-JJtkDduxLi9bivAB+cYOVMtbkqdPOhZ+ZI5LC47MIRrDV4Yn2o+ZnW10Nkmr28xRpSpdJ6Sm42Hjf2+REYXm0A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/types": "6.21.0", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^16.0.0 || >=18.0.0" + }, + "funding": { + "type": "opencollective", + "url": "https://opencollective.com/typescript-eslint" + } + }, + "node_modules/@ungap/structured-clone": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/@ungap/structured-clone/-/structured-clone-1.2.0.tgz", + "integrity": "sha512-zuVdFrMJiuCDQUMCzQaD6KL28MjnqqN8XnAqiEq9PNm/hCPTSGfrXCOfwj1ow4LFb/tNymJPwsNbVePc1xFqrQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/@vitejs/plugin-vue": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/@vitejs/plugin-vue/-/plugin-vue-4.6.2.tgz", + "integrity": "sha512-kqf7SGFoG+80aZG6Pf+gsZIVvGSCKE98JbiWqcCV9cThtg91Jav0yvYFC9Zb+jKetNGF6ZKeoaxgZfND21fWKw==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "peerDependencies": { + "vite": "^4.0.0 || ^5.0.0", + "vue": "^3.2.25" + } + }, + "node_modules/@volar/language-core": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@volar/language-core/-/language-core-1.11.1.tgz", + "integrity": "sha512-dOcNn3i9GgZAcJt43wuaEykSluAuOkQgzni1cuxLxTV0nJKanQztp7FxyswdRILaKH+P2XZMPRp2S4MV/pElCw==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/source-map": "1.11.1" + } + }, + "node_modules/@volar/source-map": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@volar/source-map/-/source-map-1.11.1.tgz", + "integrity": "sha512-hJnOnwZ4+WT5iupLRnuzbULZ42L7BWWPMmruzwtLhJfpDVoZLjNBxHDi2sY2bgZXCKlpU5XcsMFoYrsQmPhfZg==", + "dev": true, + "license": "MIT", + "dependencies": { + "muggle-string": "^0.3.1" + } + }, + "node_modules/@volar/typescript": { + "version": "1.11.1", + "resolved": "https://registry.npmjs.org/@volar/typescript/-/typescript-1.11.1.tgz", + "integrity": "sha512-iU+t2mas/4lYierSnoFOeRFQUhAEMgsFuQxoxvwn5EdQopw43j+J27a4lt9LMInx1gLJBC6qL14WYGlgymaSMQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "1.11.1", + "path-browserify": "^1.0.1" + } + }, + "node_modules/@vue/compiler-core": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/compiler-core/-/compiler-core-3.4.32.tgz", + "integrity": "sha512-8tCVWkkLe/QCWIsrIvExUGnhYCAOroUs5dzhSoKL5w4MJS8uIYiou+pOPSVIOALOQ80B0jBs+Ri+kd5+MBnCDw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.24.7", + "@vue/shared": "3.4.32", + "entities": "^4.5.0", + "estree-walker": "^2.0.2", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-dom": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/compiler-dom/-/compiler-dom-3.4.32.tgz", + "integrity": "sha512-PbSgt9KuYo4fyb90dynuPc0XFTfFPs3sCTbPLOLlo+PrUESW1gn/NjSsUvhR+mI2AmmEzexwYMxbHDldxSOr2A==", + "license": "MIT", + "dependencies": { + "@vue/compiler-core": "3.4.32", + "@vue/shared": "3.4.32" + } + }, + "node_modules/@vue/compiler-sfc": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/compiler-sfc/-/compiler-sfc-3.4.32.tgz", + "integrity": "sha512-STy9im/WHfaguJnfKjjVpMHukxHUrOKjm2vVCxiojQJyo3Sb6Os8SMXBr/MI+ekpstEGkDONfqAQoSbZhspLYw==", + "license": "MIT", + "dependencies": { + "@babel/parser": "^7.24.7", + "@vue/compiler-core": "3.4.32", + "@vue/compiler-dom": "3.4.32", + "@vue/compiler-ssr": "3.4.32", + "@vue/shared": "3.4.32", + "estree-walker": "^2.0.2", + "magic-string": "^0.30.10", + "postcss": "^8.4.39", + "source-map-js": "^1.2.0" + } + }, + "node_modules/@vue/compiler-ssr": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/compiler-ssr/-/compiler-ssr-3.4.32.tgz", + "integrity": "sha512-nyu/txTecF6DrxLrpLcI34xutrvZPtHPBj9yRoPxstIquxeeyywXpYZrQMsIeDfBhlw1abJb9CbbyZvDw2kjdg==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.4.32", + "@vue/shared": "3.4.32" + } + }, + "node_modules/@vue/devtools-api": { + "version": "6.6.3", + "resolved": "https://registry.npmjs.org/@vue/devtools-api/-/devtools-api-6.6.3.tgz", + "integrity": "sha512-0MiMsFma/HqA6g3KLKn+AGpL1kgKhFWszC9U29NfpWK5LE7bjeXxySWJrOJ77hBz+TBrBQ7o4QJqbPbqbs8rJw==", + "license": "MIT" + }, + "node_modules/@vue/eslint-config-prettier": { + "version": "8.0.0", + "resolved": "https://registry.npmjs.org/@vue/eslint-config-prettier/-/eslint-config-prettier-8.0.0.tgz", + "integrity": "sha512-55dPqtC4PM/yBjhAr+yEw6+7KzzdkBuLmnhBrDfp4I48+wy+Giqqj9yUr5T2uD/BkBROjjmqnLZmXRdOx/VtQg==", + "dev": true, + "license": "MIT", + "dependencies": { + "eslint-config-prettier": "^8.8.0", + "eslint-plugin-prettier": "^5.0.0" + }, + "peerDependencies": { + "eslint": ">= 8.0.0", + "prettier": ">= 3.0.0" + } + }, + "node_modules/@vue/eslint-config-typescript": { + "version": "12.0.0", + "resolved": "https://registry.npmjs.org/@vue/eslint-config-typescript/-/eslint-config-typescript-12.0.0.tgz", + "integrity": "sha512-StxLFet2Qe97T8+7L8pGlhYBBr8Eg05LPuTDVopQV6il+SK6qqom59BA/rcFipUef2jD8P2X44Vd8tMFytfvlg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@typescript-eslint/eslint-plugin": "^6.7.0", + "@typescript-eslint/parser": "^6.7.0", + "vue-eslint-parser": "^9.3.1" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0", + "eslint-plugin-vue": "^9.0.0", + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@vue/language-core": { + "version": "1.8.27", + "resolved": "https://registry.npmjs.org/@vue/language-core/-/language-core-1.8.27.tgz", + "integrity": "sha512-L8Kc27VdQserNaCUNiSFdDl9LWT24ly8Hpwf1ECy3aFb9m6bDhBGQYOujDm21N7EW3moKIOKEanQwe1q5BK+mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/language-core": "~1.11.1", + "@volar/source-map": "~1.11.1", + "@vue/compiler-dom": "^3.3.0", + "@vue/shared": "^3.3.0", + "computeds": "^0.0.1", + "minimatch": "^9.0.3", + "muggle-string": "^0.3.1", + "path-browserify": "^1.0.1", + "vue-template-compiler": "^2.7.14" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/@vue/reactivity": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/reactivity/-/reactivity-3.4.32.tgz", + "integrity": "sha512-1P7QvghAzhSIWmiNmh4MNkLVjr2QTNDcFv2sKmytEWhR6t7BZzNicgm5ENER4uU++wbWxgRh/pSEYgdI3MDcvg==", + "license": "MIT", + "dependencies": { + "@vue/shared": "3.4.32" + } + }, + "node_modules/@vue/runtime-core": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/runtime-core/-/runtime-core-3.4.32.tgz", + "integrity": "sha512-FxT2dTHUs1Hki8Ui/B1Hu339mx4H5kRJooqrNM32tGUHBPStJxwMzLIRbeGO/B1NMplU4Pg9fwOqrJtrOzkdfA==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.4.32", + "@vue/shared": "3.4.32" + } + }, + "node_modules/@vue/runtime-dom": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/runtime-dom/-/runtime-dom-3.4.32.tgz", + "integrity": "sha512-Xz9G+ZViRyPFQtRBCPFkhMzKn454ihCPMKUiacNaUhuTIXvyfkAq8l89IZ/kegFVyw/7KkJGRGqYdEZrf27Xsg==", + "license": "MIT", + "dependencies": { + "@vue/reactivity": "3.4.32", + "@vue/runtime-core": "3.4.32", + "@vue/shared": "3.4.32", + "csstype": "^3.1.3" + } + }, + "node_modules/@vue/server-renderer": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/server-renderer/-/server-renderer-3.4.32.tgz", + "integrity": "sha512-3c4rd0522Ao8hKjzgmUAbcjv2mBnvnw0Ld2f8HOMCuWJZjYie/p8cpIoYJbeP0VV2JYmrJJMwGQDO5RH4iQ30A==", + "license": "MIT", + "dependencies": { + "@vue/compiler-ssr": "3.4.32", + "@vue/shared": "3.4.32" + }, + "peerDependencies": { + "vue": "3.4.32" + } + }, + "node_modules/@vue/shared": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/@vue/shared/-/shared-3.4.32.tgz", + "integrity": "sha512-ep4mF1IVnX/pYaNwxwOpJHyBtOMKWoKZMbnUyd+z0udqIxLUh7YCCd/JfDna8aUrmnG9SFORyIq2HzEATRrQsg==", + "license": "MIT" + }, + "node_modules/@vue/tsconfig": { + "version": "0.5.1", + "resolved": "https://registry.npmjs.org/@vue/tsconfig/-/tsconfig-0.5.1.tgz", + "integrity": "sha512-VcZK7MvpjuTPx2w6blwnwZAu5/LgBUtejFOi3pPGQFXQN5Ela03FUtd2Qtg4yWGGissVL0dr6Ro1LfOFh+PCuQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/acorn": { + "version": "8.12.1", + "resolved": "https://registry.npmjs.org/acorn/-/acorn-8.12.1.tgz", + "integrity": "sha512-tcpGyI9zbizT9JbV6oYE477V6mTlXvvi0T0G3SNIYE2apm/G5huBa1+K89VGeovbg+jycCrfhl3ADxErOuO6Jg==", + "dev": true, + "license": "MIT", + "bin": { + "acorn": "bin/acorn" + }, + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/acorn-jsx": { + "version": "5.3.2", + "resolved": "https://registry.npmjs.org/acorn-jsx/-/acorn-jsx-5.3.2.tgz", + "integrity": "sha512-rq9s+JNhf0IChjtDXxllJ7g41oZk5SlXtp0LHwyA5cejwn7vKmKp4pPri6YEePv2PU65sAsegbXtIinmDFDXgQ==", + "dev": true, + "license": "MIT", + "peerDependencies": { + "acorn": "^6.0.0 || ^7.0.0 || ^8.0.0" + } + }, + "node_modules/ajv": { + "version": "6.12.6", + "resolved": "https://registry.npmjs.org/ajv/-/ajv-6.12.6.tgz", + "integrity": "sha512-j3fVLgvTo527anyYyJOGTYJbG+vnnQYvE0m5mmkc1TK+nxAppkCLMIL0aZ4dblVCNoGShhm+kzE4ZUykBoMg4g==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-deep-equal": "^3.1.1", + "fast-json-stable-stringify": "^2.0.0", + "json-schema-traverse": "^0.4.1", + "uri-js": "^4.2.2" + }, + "funding": { + "type": "github", + "url": "https://github.com/sponsors/epoberezkin" + } + }, + "node_modules/ansi-regex": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/ansi-regex/-/ansi-regex-5.0.1.tgz", + "integrity": "sha512-quJQXlTSUGL2LH9SUXo8VwsY4soanhgo6LNSm84E1LBcE8s3O0wpdiRzyR9z/ZZJMlMWv37qOOb9pdJlMUEKFQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/ansi-styles": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-4.3.0.tgz", + "integrity": "sha512-zbB9rCJAT1rbjiVDb2hqKFHNYLxgtk8NURxZ3IZwD3F6NtxbXZQCnnSi1Lkx+IDohdPlFp222wVALIheZJQSEg==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-convert": "^2.0.1" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/anymatch": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/anymatch/-/anymatch-3.1.3.tgz", + "integrity": "sha512-KMReFUr0B4t+D+OBkjR3KYqvocp2XaSzO55UcB6mgQMd3KbcE+mWTyvVV7D/zsdEbNnV6acZUutkiHQXvTr1Rw==", + "dev": true, + "license": "ISC", + "dependencies": { + "normalize-path": "^3.0.0", + "picomatch": "^2.0.4" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/argparse": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/argparse/-/argparse-2.0.1.tgz", + "integrity": "sha512-8+9WqebbFzpX9OR+Wa6O29asIogeRMzcGtAINdpMHHyAg10f05aSFVBbcEqGf/PXw1EjAZ+q2/bEBg3DvurK3Q==", + "license": "Python-2.0" + }, + "node_modules/array-union": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/array-union/-/array-union-2.1.0.tgz", + "integrity": "sha512-HGyxoOTYUyCM6stUe6EJgnd4EoewAI7zMdfqO+kGjnlZmBDz/cR5pf8r/cR4Wq60sL/p0IkcjUEEPwS3GFrIyw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/asynckit": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/asynckit/-/asynckit-0.4.0.tgz", + "integrity": "sha512-Oei9OH4tRh0YqU3GxhX79dM/mwVgvbZJaSNaRk+bshkj0S5cfHcgYakreBjrHwatXKbz+IoIdYLxrKim2MjW0Q==", + "license": "MIT" + }, + "node_modules/axios": { + "version": "1.7.5", + "resolved": "https://registry.npmjs.org/axios/-/axios-1.7.5.tgz", + "integrity": "sha512-fZu86yCo+svH3uqJ/yTdQ0QHpQu5oL+/QE+QPSv6BZSkDAoky9vytxp7u5qk83OJFS3kEBcesWni9WTZAv3tSw==", + "license": "MIT", + "dependencies": { + "follow-redirects": "^1.15.6", + "form-data": "^4.0.0", + "proxy-from-env": "^1.1.0" + } + }, + "node_modules/balanced-match": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/balanced-match/-/balanced-match-1.0.2.tgz", + "integrity": "sha512-3oSeUO0TMV67hN1AmbXsK4yaqU7tjiHlbxRDZOpH0KW9+CeX4bRAaX0Anxt0tx2MrpRpWwQaPwIlISEJhYU5Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/binary-extensions": { + "version": "2.3.0", + "resolved": "https://registry.npmjs.org/binary-extensions/-/binary-extensions-2.3.0.tgz", + "integrity": "sha512-Ceh+7ox5qe7LJuLHoY0feh3pHuUDHAcRUeyL2VYghZwfpkNIy/+8Ocg0a3UuSoYzavmylwuLWQOf3hl0jjMMIw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/boolbase": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/boolbase/-/boolbase-1.0.0.tgz", + "integrity": "sha512-JZOSA7Mo9sNGB8+UjSgzdLtokWAky1zbztM3WRLCbZ70/3cTANmQmOdR7y2g+J0e2WXywy1yS468tY+IruqEww==", + "dev": true, + "license": "ISC" + }, + "node_modules/brace-expansion": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-2.0.1.tgz", + "integrity": "sha512-XnAIvQ8eM+kC6aULx6wuQiwVsnzsi9d3WxzV3FpWTGA19F621kwdbsAcFKXgKUHZWsy+mY6iL1sHTxWEFCytDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0" + } + }, + "node_modules/braces": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/braces/-/braces-3.0.3.tgz", + "integrity": "sha512-yQbXgO/OSZVD2IsiLlro+7Hf6Q18EJrKSEsdoMzKePKXct3gvD8oLcOQdIzGupr5Fj+EDe8gO/lxc1BzfMpxvA==", + "dev": true, + "license": "MIT", + "dependencies": { + "fill-range": "^7.1.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/callsites": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/callsites/-/callsites-3.1.0.tgz", + "integrity": "sha512-P8BjAsXvZS+VIDUI11hHCQEv74YT67YUi5JJFNWIqL235sBmjX4+qx9Muvls5ivyNENctx46xQLQ3aTuE7ssaQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/chalk": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/chalk/-/chalk-4.1.2.tgz", + "integrity": "sha512-oKnbhFyRIXpUuez8iBMmyEa4nbj4IOQyuhc/wy9kY7/WVPcwIO9VA668Pu8RkO7+0G76SLROeyw9CpQ061i4mA==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^4.1.0", + "supports-color": "^7.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/chalk/chalk?sponsor=1" + } + }, + "node_modules/chokidar": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/chokidar/-/chokidar-3.6.0.tgz", + "integrity": "sha512-7VT13fmjotKpGipCW9JEQAusEPE+Ei8nl6/g4FBAmIm0GOOLMua9NDDo/DWp0ZAxCr3cPq5ZpBqmPAQgDda2Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "anymatch": "~3.1.2", + "braces": "~3.0.2", + "glob-parent": "~5.1.2", + "is-binary-path": "~2.1.0", + "is-glob": "~4.0.1", + "normalize-path": "~3.0.0", + "readdirp": "~3.6.0" + }, + "engines": { + "node": ">= 8.10.0" + }, + "funding": { + "url": "https://paulmillr.com/funding/" + }, + "optionalDependencies": { + "fsevents": "~2.3.2" + } + }, + "node_modules/chokidar/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/codemirror": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/codemirror/-/codemirror-6.0.1.tgz", + "integrity": "sha512-J8j+nZ+CdWmIeFIGXEFbFPtpiYacFMDR8GlHK3IyHQJMCaVRfGx9NT+Hxivv1ckLWPvNdZqndbr/7lVhrf/Svg==", + "license": "MIT", + "dependencies": { + "@codemirror/autocomplete": "^6.0.0", + "@codemirror/commands": "^6.0.0", + "@codemirror/language": "^6.0.0", + "@codemirror/lint": "^6.0.0", + "@codemirror/search": "^6.0.0", + "@codemirror/state": "^6.0.0", + "@codemirror/view": "^6.0.0" + } + }, + "node_modules/color-convert": { + "version": "2.0.1", + "resolved": "https://registry.npmjs.org/color-convert/-/color-convert-2.0.1.tgz", + "integrity": "sha512-RRECPsj7iu/xb5oKYcsFHSppFNnsj/52OVTRKb4zP5onXwVF3zVmmToNcOfGC+CRDpfK/U584fMg38ZHCaElKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "color-name": "~1.1.4" + }, + "engines": { + "node": ">=7.0.0" + } + }, + "node_modules/color-name": { + "version": "1.1.4", + "resolved": "https://registry.npmjs.org/color-name/-/color-name-1.1.4.tgz", + "integrity": "sha512-dOy+3AuW3a2wNbZHIuMZpTcgjGuLU/uBL/ubcZF9OXbDo8ff4O8yVp5Bf0efS8uEoYo5q4Fx7dY9OgQGXgAsQA==", + "dev": true, + "license": "MIT" + }, + "node_modules/combined-stream": { + "version": "1.0.8", + "resolved": "https://registry.npmjs.org/combined-stream/-/combined-stream-1.0.8.tgz", + "integrity": "sha512-FQN4MRfuJeHf7cBbBMJFXhKSDq+2kAArBlmRBvcvFE5BB1HZKXtSFASDhdlz9zOYwxh8lDdnvmMOe/+5cdoEdg==", + "license": "MIT", + "dependencies": { + "delayed-stream": "~1.0.0" + }, + "engines": { + "node": ">= 0.8" + } + }, + "node_modules/computeds": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/computeds/-/computeds-0.0.1.tgz", + "integrity": "sha512-7CEBgcMjVmitjYo5q8JTJVra6X5mQ20uTThdK+0kR7UEaDrAWEQcRiBtWJzga4eRpP6afNwwLsX2SET2JhVB1Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/concat-map": { + "version": "0.0.1", + "resolved": "https://registry.npmjs.org/concat-map/-/concat-map-0.0.1.tgz", + "integrity": "sha512-/Srv4dswyQNBfohGpz9o6Yb3Gz3SrUDqBH5rTuhGR7ahtlbYKnVxw2bCFMRljaA7EXHaXZ8wsHdodFvbkhKmqg==", + "dev": true, + "license": "MIT" + }, + "node_modules/crelt": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/crelt/-/crelt-1.0.6.tgz", + "integrity": "sha512-VQ2MBenTq1fWZUH9DJNGti7kKv6EeAuYr3cLwxUWhIu1baTaXh4Ib5W2CqHVqib4/MqbYGJqiL3Zb8GJZr3l4g==", + "license": "MIT" + }, + "node_modules/cross-spawn": { + "version": "7.0.3", + "resolved": "https://registry.npmjs.org/cross-spawn/-/cross-spawn-7.0.3.tgz", + "integrity": "sha512-iRDPJKUPVEND7dHPO8rkbOnPpyDygcDFtWjpeWNCgy8WP2rXcxXL8TskReQl6OrB2G7+UJrags1q15Fudc7G6w==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-key": "^3.1.0", + "shebang-command": "^2.0.0", + "which": "^2.0.1" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/cssesc": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/cssesc/-/cssesc-3.0.0.tgz", + "integrity": "sha512-/Tb/JcjK111nNScGob5MNtsntNM1aCNUDipB/TkwZFhyDrrE47SOx/18wF2bbjgc3ZzCSKW1T5nt5EbFoAz/Vg==", + "dev": true, + "license": "MIT", + "bin": { + "cssesc": "bin/cssesc" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/csstype": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/csstype/-/csstype-3.1.3.tgz", + "integrity": "sha512-M1uQkMl8rQK/szD0LNhtqxIPLpimGm8sOBwU7lLnCpSbTyY3yeU1Vc7l4KT5zT4s/yOxHH5O7tIuuLOCnLADRw==", + "license": "MIT" + }, + "node_modules/de-indent": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/de-indent/-/de-indent-1.0.2.tgz", + "integrity": "sha512-e/1zu3xH5MQryN2zdVaF0OrdNLUbvWxzMbi+iNA6Bky7l1RoP8a2fIbRocyHclXt/arDrrR6lL3TqFD9pMQTsg==", + "dev": true, + "license": "MIT" + }, + "node_modules/debug": { + "version": "4.3.5", + "resolved": "https://registry.npmjs.org/debug/-/debug-4.3.5.tgz", + "integrity": "sha512-pt0bNEmneDIvdL1Xsd9oDQ/wrQRkXDT4AUWlNZNPKvW5x/jyO9VFXkJUP07vQ2upmw5PlaITaPKc31jK13V+jg==", + "dev": true, + "license": "MIT", + "dependencies": { + "ms": "2.1.2" + }, + "engines": { + "node": ">=6.0" + }, + "peerDependenciesMeta": { + "supports-color": { + "optional": true + } + } + }, + "node_modules/deep-is": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/deep-is/-/deep-is-0.1.4.tgz", + "integrity": "sha512-oIPzksmTg4/MriiaYGO+okXDT7ztn/w3Eptv/+gSIdMdKsJo0u4CfYNFJPy+4SKMuCqGw2wxnA+URMg3t8a/bQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/delayed-stream": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/delayed-stream/-/delayed-stream-1.0.0.tgz", + "integrity": "sha512-ZySD7Nf91aLB0RxL4KGrKHBXl7Eds1DAmEdcoVawXnLD7SDhpNgtuII2aAkg7a7QS41jxPSZ17p4VdGnMHk3MQ==", + "license": "MIT", + "engines": { + "node": ">=0.4.0" + } + }, + "node_modules/dir-glob": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/dir-glob/-/dir-glob-3.0.1.tgz", + "integrity": "sha512-WkrWp9GR4KXfKGYzOLmTuGVi1UWFfws377n9cc55/tb6DuqyF6pcQ5AbiHEshaDpY9v6oaSr2XCDidGmMwdzIA==", + "dev": true, + "license": "MIT", + "dependencies": { + "path-type": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/doctrine": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/doctrine/-/doctrine-3.0.0.tgz", + "integrity": "sha512-yS+Q5i3hBf7GBkd4KG8a7eBNNWNGLTaEwwYWUijIYM7zrlYDM0BFXHjjPWlWZ1Rg7UaddZeIDmi9jF3HmqiQ2w==", + "dev": true, + "license": "Apache-2.0", + "dependencies": { + "esutils": "^2.0.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/entities": { + "version": "4.5.0", + "resolved": "https://registry.npmjs.org/entities/-/entities-4.5.0.tgz", + "integrity": "sha512-V0hjH4dGPh9Ao5p0MoRY6BVqtwCjhz6vI5LT8AJ55H+4g9/4vbHx1I54fS0XuclLhDHArPQCiMjDxjaL8fPxhw==", + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.12" + }, + "funding": { + "url": "https://github.com/fb55/entities?sponsor=1" + } + }, + "node_modules/esbuild": { + "version": "0.21.5", + "resolved": "https://registry.npmjs.org/esbuild/-/esbuild-0.21.5.tgz", + "integrity": "sha512-mg3OPMV4hXywwpoDxu3Qda5xCKQi+vCTZq8S9J/EpkhB2HzKXq4SNFZE3+NK93JYxc8VMSep+lOUSC/RVKaBqw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "bin": { + "esbuild": "bin/esbuild" + }, + "engines": { + "node": ">=12" + }, + "optionalDependencies": { + "@esbuild/aix-ppc64": "0.21.5", + "@esbuild/android-arm": "0.21.5", + "@esbuild/android-arm64": "0.21.5", + "@esbuild/android-x64": "0.21.5", + "@esbuild/darwin-arm64": "0.21.5", + "@esbuild/darwin-x64": "0.21.5", + "@esbuild/freebsd-arm64": "0.21.5", + "@esbuild/freebsd-x64": "0.21.5", + "@esbuild/linux-arm": "0.21.5", + "@esbuild/linux-arm64": "0.21.5", + "@esbuild/linux-ia32": "0.21.5", + "@esbuild/linux-loong64": "0.21.5", + "@esbuild/linux-mips64el": "0.21.5", + "@esbuild/linux-ppc64": "0.21.5", + "@esbuild/linux-riscv64": "0.21.5", + "@esbuild/linux-s390x": "0.21.5", + "@esbuild/linux-x64": "0.21.5", + "@esbuild/netbsd-x64": "0.21.5", + "@esbuild/openbsd-x64": "0.21.5", + "@esbuild/sunos-x64": "0.21.5", + "@esbuild/win32-arm64": "0.21.5", + "@esbuild/win32-ia32": "0.21.5", + "@esbuild/win32-x64": "0.21.5" + } + }, + "node_modules/escape-string-regexp": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/escape-string-regexp/-/escape-string-regexp-4.0.0.tgz", + "integrity": "sha512-TtpcNJ3XAzx3Gq8sWRzJaVajRs0uVxA2YAkdb1jm2YkPz4G6egUFAyA3n5vtEIZefPk5Wa4UXbKuS5fKkJWdgA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/eslint": { + "version": "8.57.0", + "resolved": "https://registry.npmjs.org/eslint/-/eslint-8.57.0.tgz", + "integrity": "sha512-dZ6+mexnaTIbSBZWgou51U6OmzIhYM2VcNdtiTtI7qPNZm35Akpr0f6vtw3w1Kmn5PYo+tZVfh13WrhpS6oLqQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.2.0", + "@eslint-community/regexpp": "^4.6.1", + "@eslint/eslintrc": "^2.1.4", + "@eslint/js": "8.57.0", + "@humanwhocodes/config-array": "^0.11.14", + "@humanwhocodes/module-importer": "^1.0.1", + "@nodelib/fs.walk": "^1.2.8", + "@ungap/structured-clone": "^1.2.0", + "ajv": "^6.12.4", + "chalk": "^4.0.0", + "cross-spawn": "^7.0.2", + "debug": "^4.3.2", + "doctrine": "^3.0.0", + "escape-string-regexp": "^4.0.0", + "eslint-scope": "^7.2.2", + "eslint-visitor-keys": "^3.4.3", + "espree": "^9.6.1", + "esquery": "^1.4.2", + "esutils": "^2.0.2", + "fast-deep-equal": "^3.1.3", + "file-entry-cache": "^6.0.1", + "find-up": "^5.0.0", + "glob-parent": "^6.0.2", + "globals": "^13.19.0", + "graphemer": "^1.4.0", + "ignore": "^5.2.0", + "imurmurhash": "^0.1.4", + "is-glob": "^4.0.0", + "is-path-inside": "^3.0.3", + "js-yaml": "^4.1.0", + "json-stable-stringify-without-jsonify": "^1.0.1", + "levn": "^0.4.1", + "lodash.merge": "^4.6.2", + "minimatch": "^3.1.2", + "natural-compare": "^1.4.0", + "optionator": "^0.9.3", + "strip-ansi": "^6.0.1", + "text-table": "^0.2.0" + }, + "bin": { + "eslint": "bin/eslint.js" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-config-prettier": { + "version": "8.10.0", + "resolved": "https://registry.npmjs.org/eslint-config-prettier/-/eslint-config-prettier-8.10.0.tgz", + "integrity": "sha512-SM8AMJdeQqRYT9O9zguiruQZaN7+z+E4eAP9oiLNGKMtomwaB1E9dcgUD6ZAn/eQAb52USbvezbiljfZUhbJcg==", + "dev": true, + "license": "MIT", + "bin": { + "eslint-config-prettier": "bin/cli.js" + }, + "peerDependencies": { + "eslint": ">=7.0.0" + } + }, + "node_modules/eslint-plugin-prettier": { + "version": "5.2.1", + "resolved": "https://registry.npmjs.org/eslint-plugin-prettier/-/eslint-plugin-prettier-5.2.1.tgz", + "integrity": "sha512-gH3iR3g4JfF+yYPaJYkN7jEl9QbweL/YfkoRlNnuIEHEz1vHVlCmWOS+eGGiRuzHQXdJFCOTxRgvju9b8VUmrw==", + "dev": true, + "license": "MIT", + "dependencies": { + "prettier-linter-helpers": "^1.0.0", + "synckit": "^0.9.1" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint-plugin-prettier" + }, + "peerDependencies": { + "@types/eslint": ">=8.0.0", + "eslint": ">=8.0.0", + "eslint-config-prettier": "*", + "prettier": ">=3.0.0" + }, + "peerDependenciesMeta": { + "@types/eslint": { + "optional": true + }, + "eslint-config-prettier": { + "optional": true + } + } + }, + "node_modules/eslint-plugin-vue": { + "version": "9.27.0", + "resolved": "https://registry.npmjs.org/eslint-plugin-vue/-/eslint-plugin-vue-9.27.0.tgz", + "integrity": "sha512-5Dw3yxEyuBSXTzT5/Ge1X5kIkRTQ3nvBn/VwPwInNiZBSJOO/timWMUaflONnFBzU6NhB68lxnCda7ULV5N7LA==", + "dev": true, + "license": "MIT", + "dependencies": { + "@eslint-community/eslint-utils": "^4.4.0", + "globals": "^13.24.0", + "natural-compare": "^1.4.0", + "nth-check": "^2.1.1", + "postcss-selector-parser": "^6.0.15", + "semver": "^7.6.0", + "vue-eslint-parser": "^9.4.3", + "xml-name-validator": "^4.0.0" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "peerDependencies": { + "eslint": "^6.2.0 || ^7.0.0 || ^8.0.0 || ^9.0.0" + } + }, + "node_modules/eslint-scope": { + "version": "7.2.2", + "resolved": "https://registry.npmjs.org/eslint-scope/-/eslint-scope-7.2.2.tgz", + "integrity": "sha512-dOt21O7lTMhDM+X9mB4GX+DZrZtCUJPL/wlcTqxyrx5IvO0IYtILdtrQGQp+8n5S0gwSVmOf9NQrjMOgfQZlIg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "esrecurse": "^4.3.0", + "estraverse": "^5.2.0" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint-visitor-keys": { + "version": "3.4.3", + "resolved": "https://registry.npmjs.org/eslint-visitor-keys/-/eslint-visitor-keys-3.4.3.tgz", + "integrity": "sha512-wpc+LXeiyiisxPlEkUzU6svyS1frIO3Mgxj1fdy7Pm8Ygzguax2N3Fa/D/ag1WqbOprdI+uY6wMUl8/a2G+iag==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/eslint/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/eslint/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/espree": { + "version": "9.6.1", + "resolved": "https://registry.npmjs.org/espree/-/espree-9.6.1.tgz", + "integrity": "sha512-oruZaFkjorTpF32kDSI5/75ViwGeZginGGy2NoOSg3Q9bnwlnmDm4HLnkl0RE3n+njDXR037aY1+x58Z/zFdwQ==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "acorn": "^8.9.0", + "acorn-jsx": "^5.3.2", + "eslint-visitor-keys": "^3.4.1" + }, + "engines": { + "node": "^12.22.0 || ^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/eslint" + } + }, + "node_modules/esquery": { + "version": "1.6.0", + "resolved": "https://registry.npmjs.org/esquery/-/esquery-1.6.0.tgz", + "integrity": "sha512-ca9pw9fomFcKPvFLXhBKUK90ZvGibiGOvRJNbjljY7s7uq/5YO4BOzcYtJqExdx99rF6aAcnRxHmcUHcz6sQsg==", + "dev": true, + "license": "BSD-3-Clause", + "dependencies": { + "estraverse": "^5.1.0" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/esrecurse": { + "version": "4.3.0", + "resolved": "https://registry.npmjs.org/esrecurse/-/esrecurse-4.3.0.tgz", + "integrity": "sha512-KmfKL3b6G+RXvP8N1vr3Tq1kL/oCFgn2NYXEtqP8/L3pKapUA4G8cFVaoF3SU323CD4XypR/ffioHmkti6/Tag==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "estraverse": "^5.2.0" + }, + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estraverse": { + "version": "5.3.0", + "resolved": "https://registry.npmjs.org/estraverse/-/estraverse-5.3.0.tgz", + "integrity": "sha512-MMdARuVEQziNTeJD8DgMqmhwR11BRQ/cBP+pLtYdSTnf3MIO8fFeiINEbX36ZdNlfU/7A9f3gUw49B3oQsvwBA==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=4.0" + } + }, + "node_modules/estree-walker": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/estree-walker/-/estree-walker-2.0.2.tgz", + "integrity": "sha512-Rfkk/Mp/DL7JVje3u18FxFujQlTNR2q6QfMSMB7AvCBx91NGj/ba3kCfza0f6dVDbw7YlRf/nDrn7pQrCCyQ/w==", + "license": "MIT" + }, + "node_modules/esutils": { + "version": "2.0.3", + "resolved": "https://registry.npmjs.org/esutils/-/esutils-2.0.3.tgz", + "integrity": "sha512-kVscqXk4OCp68SZ0dkgEKVi6/8ij300KBWTJq32P/dYeWTSwK41WyTxalN1eRmA5Z9UU/LX9D7FWSmV9SAYx6g==", + "dev": true, + "license": "BSD-2-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/fast-deep-equal": { + "version": "3.1.3", + "resolved": "https://registry.npmjs.org/fast-deep-equal/-/fast-deep-equal-3.1.3.tgz", + "integrity": "sha512-f3qQ9oQy9j2AhBe/H9VC91wLmKBCCU/gDOnKNAYG5hswO7BLKj09Hc5HYNz9cGI++xlpDCIgDaitVs03ATR84Q==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-diff": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/fast-diff/-/fast-diff-1.3.0.tgz", + "integrity": "sha512-VxPP4NqbUjj6MaAOafWeUn2cXWLcCtljklUtZf0Ind4XQ+QPtmA0b18zZy0jIQx+ExRVCR/ZQpBmik5lXshNsw==", + "dev": true, + "license": "Apache-2.0" + }, + "node_modules/fast-glob": { + "version": "3.3.2", + "resolved": "https://registry.npmjs.org/fast-glob/-/fast-glob-3.3.2.tgz", + "integrity": "sha512-oX2ruAFQwf/Orj8m737Y5adxDQO0LAB7/S5MnxCdTNDd4p6BsyIVsv9JQsATbTSq8KHRpLwIHbVlUNatxd+1Ow==", + "dev": true, + "license": "MIT", + "dependencies": { + "@nodelib/fs.stat": "^2.0.2", + "@nodelib/fs.walk": "^1.2.3", + "glob-parent": "^5.1.2", + "merge2": "^1.3.0", + "micromatch": "^4.0.4" + }, + "engines": { + "node": ">=8.6.0" + } + }, + "node_modules/fast-glob/node_modules/glob-parent": { + "version": "5.1.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-5.1.2.tgz", + "integrity": "sha512-AOIgSQCepiJYwP3ARnGx+5VnTu2HBYdzbGP45eLw1vr3zB3vZLeyed1sC9hnbcOc9/SrMyM5RPQrkGz4aS9Zow==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.1" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fast-json-stable-stringify": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/fast-json-stable-stringify/-/fast-json-stable-stringify-2.1.0.tgz", + "integrity": "sha512-lhd/wF+Lk98HZoTCtlVraHtfh5XYijIjalXck7saUtuanSDyLMxnHhSXEDJqHxD7msR8D0uCmqlkwjCV8xvwHw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fast-levenshtein": { + "version": "2.0.6", + "resolved": "https://registry.npmjs.org/fast-levenshtein/-/fast-levenshtein-2.0.6.tgz", + "integrity": "sha512-DCXu6Ifhqcks7TZKY3Hxp3y6qphY5SJZmrWMDrKcERSOXWQdMhU9Ig/PYrzyw/ul9jOIyh0N4M0tbC5hodg8dw==", + "dev": true, + "license": "MIT" + }, + "node_modules/fastq": { + "version": "1.17.1", + "resolved": "https://registry.npmjs.org/fastq/-/fastq-1.17.1.tgz", + "integrity": "sha512-sRVD3lWVIXWg6By68ZN7vho9a1pQcN/WBFaAAsDDFzlJjvoGx0P8z7V1t72grFJfJhu3YPZBuu25f7Kaw2jN1w==", + "dev": true, + "license": "ISC", + "dependencies": { + "reusify": "^1.0.4" + } + }, + "node_modules/file-entry-cache": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/file-entry-cache/-/file-entry-cache-6.0.1.tgz", + "integrity": "sha512-7Gps/XWymbLk2QLYK4NzpMOrYjMhdIxXuIvy2QBsLE6ljuodKvdkWs/cpyJJ3CVIVpH0Oi1Hvg1ovbMzLdFBBg==", + "dev": true, + "license": "MIT", + "dependencies": { + "flat-cache": "^3.0.4" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/fill-range": { + "version": "7.1.1", + "resolved": "https://registry.npmjs.org/fill-range/-/fill-range-7.1.1.tgz", + "integrity": "sha512-YsGpe3WHLK8ZYi4tWDg2Jy3ebRz2rXowDxnld4bkQB00cc/1Zw9AWnC0i9ztDJitivtQvaI9KaLyKrc+hBW0yg==", + "dev": true, + "license": "MIT", + "dependencies": { + "to-regex-range": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/find-up": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/find-up/-/find-up-5.0.0.tgz", + "integrity": "sha512-78/PXT1wlLLDgTzDs7sjq9hzz0vXD+zn+7wypEe4fXQxCmdmqfGsEPQxmiCSQI3ajFV91bVSsvNtrJRiW6nGng==", + "dev": true, + "license": "MIT", + "dependencies": { + "locate-path": "^6.0.0", + "path-exists": "^4.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/flat-cache": { + "version": "3.2.0", + "resolved": "https://registry.npmjs.org/flat-cache/-/flat-cache-3.2.0.tgz", + "integrity": "sha512-CYcENa+FtcUKLmhhqyctpclsq7QF38pKjZHsGNiSQF5r4FtoKDWabFDl3hzaEQMvT1LHEysw5twgLvpYYb4vbw==", + "dev": true, + "license": "MIT", + "dependencies": { + "flatted": "^3.2.9", + "keyv": "^4.5.3", + "rimraf": "^3.0.2" + }, + "engines": { + "node": "^10.12.0 || >=12.0.0" + } + }, + "node_modules/flatted": { + "version": "3.3.1", + "resolved": "https://registry.npmjs.org/flatted/-/flatted-3.3.1.tgz", + "integrity": "sha512-X8cqMLLie7KsNUDSdzeN8FYK9rEt4Dt67OsG/DNGnYTSDBG4uFAJFBnUeiV+zCVAvwFy56IjM9sH51jVaEhNxw==", + "dev": true, + "license": "ISC" + }, + "node_modules/follow-redirects": { + "version": "1.15.6", + "resolved": "https://registry.npmjs.org/follow-redirects/-/follow-redirects-1.15.6.tgz", + "integrity": "sha512-wWN62YITEaOpSK584EZXJafH1AGpO8RVgElfkuXbTOrPX4fIfOyEpW/CsiNd8JdYrAoOvafRTOEnvsO++qCqFA==", + "funding": [ + { + "type": "individual", + "url": "https://github.com/sponsors/RubenVerborgh" + } + ], + "license": "MIT", + "engines": { + "node": ">=4.0" + }, + "peerDependenciesMeta": { + "debug": { + "optional": true + } + } + }, + "node_modules/form-data": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/form-data/-/form-data-4.0.0.tgz", + "integrity": "sha512-ETEklSGi5t0QMZuiXoA/Q6vcnxcLQP5vdugSpuAyi6SVGi2clPPp+xgEhuMaHC+zGgn31Kd235W35f7Hykkaww==", + "license": "MIT", + "dependencies": { + "asynckit": "^0.4.0", + "combined-stream": "^1.0.8", + "mime-types": "^2.1.12" + }, + "engines": { + "node": ">= 6" + } + }, + "node_modules/fs.realpath": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/fs.realpath/-/fs.realpath-1.0.0.tgz", + "integrity": "sha512-OO0pH2lK6a0hZnAdau5ItzHPI6pUlvI7jMVnxUQRtw4owF2wk8lOSabtGDCTP4Ggrg2MbGnWO9X8K1t4+fGMDw==", + "dev": true, + "license": "ISC" + }, + "node_modules/fsevents": { + "version": "2.3.3", + "resolved": "https://registry.npmjs.org/fsevents/-/fsevents-2.3.3.tgz", + "integrity": "sha512-5xoDfX+fL7faATnagmWPpbFtwh/R77WmMMqqHGS65C3vvB0YHrgF+B1YmZ3441tMj5n63k0212XNoJwzlhffQw==", + "dev": true, + "hasInstallScript": true, + "license": "MIT", + "optional": true, + "os": [ + "darwin" + ], + "engines": { + "node": "^8.16.0 || ^10.6.0 || >=11.0.0" + } + }, + "node_modules/glob": { + "version": "7.2.3", + "resolved": "https://registry.npmjs.org/glob/-/glob-7.2.3.tgz", + "integrity": "sha512-nFR0zLpU2YCaRxwoCJvL6UvCH2JFyFVIvwTLsIf21AuHlMskA1hhTdk+LlYJtOlYt9v6dvszD2BGRqBL+iQK9Q==", + "deprecated": "Glob versions prior to v9 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "fs.realpath": "^1.0.0", + "inflight": "^1.0.4", + "inherits": "2", + "minimatch": "^3.1.1", + "once": "^1.3.0", + "path-is-absolute": "^1.0.0" + }, + "engines": { + "node": "*" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/glob-parent": { + "version": "6.0.2", + "resolved": "https://registry.npmjs.org/glob-parent/-/glob-parent-6.0.2.tgz", + "integrity": "sha512-XxwI8EOhVQgWp6iDL+3b0r86f4d6AX6zSU55HfB4ydCEuXLXc5FcYeOu+nnGftS4TEju/11rt4KJPTMgbfmv4A==", + "dev": true, + "license": "ISC", + "dependencies": { + "is-glob": "^4.0.3" + }, + "engines": { + "node": ">=10.13.0" + } + }, + "node_modules/glob/node_modules/brace-expansion": { + "version": "1.1.11", + "resolved": "https://registry.npmjs.org/brace-expansion/-/brace-expansion-1.1.11.tgz", + "integrity": "sha512-iCuPHDFgrHX7H2vEI/5xpz07zSHB00TpugqhmYtVmMO6518mCuRMoOYFldEBl0g187ufozdaHgWKcYFb61qGiA==", + "dev": true, + "license": "MIT", + "dependencies": { + "balanced-match": "^1.0.0", + "concat-map": "0.0.1" + } + }, + "node_modules/glob/node_modules/minimatch": { + "version": "3.1.2", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-3.1.2.tgz", + "integrity": "sha512-J7p63hRiAjw1NDEww1W7i37+ByIrOWO5XQQAzZ3VOcL0PNybwpfmV/N05zFAzwQ9USyEcX6t3UO+K5aqBQOIHw==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^1.1.7" + }, + "engines": { + "node": "*" + } + }, + "node_modules/globals": { + "version": "13.24.0", + "resolved": "https://registry.npmjs.org/globals/-/globals-13.24.0.tgz", + "integrity": "sha512-AhO5QUcj8llrbG09iWhPU2B204J1xnPeL8kQmVorSsy+Sjj1sk8gIyh6cUocGmH4L0UuhAJy+hJMRA4mgA4mFQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "type-fest": "^0.20.2" + }, + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/globby": { + "version": "11.1.0", + "resolved": "https://registry.npmjs.org/globby/-/globby-11.1.0.tgz", + "integrity": "sha512-jhIXaOzy1sb8IyocaruWSn1TjmnBVs8Ayhcy83rmxNJ8q2uWKCAj3CnJY+KpGSXCueAPc0i05kVvVKtP1t9S3g==", + "dev": true, + "license": "MIT", + "dependencies": { + "array-union": "^2.1.0", + "dir-glob": "^3.0.1", + "fast-glob": "^3.2.9", + "ignore": "^5.2.0", + "merge2": "^1.4.1", + "slash": "^3.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/graphemer": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/graphemer/-/graphemer-1.4.0.tgz", + "integrity": "sha512-EtKwoO6kxCL9WO5xipiHTZlSzBm7WLT627TqC/uVRd0HKmq8NXyebnNYxDoBi7wt8eTWrUrKXCOVaFq9x1kgag==", + "dev": true, + "license": "MIT" + }, + "node_modules/has-flag": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/has-flag/-/has-flag-4.0.0.tgz", + "integrity": "sha512-EykJT/Q1KjTWctppgIAgfSO0tKVuZUjhgMr17kqTumMl6Afv3EISleU7qZUzoXDFTAHTDC4NOoG/ZxU3EvlMPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/he": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/he/-/he-1.2.0.tgz", + "integrity": "sha512-F/1DnUGPopORZi0ni+CvrCgHQ5FyEAHRLSApuYWMmrbSwoN2Mn/7k+Gl38gJnR7yyDZk6WLXwiGod1JOWNDKGw==", + "dev": true, + "license": "MIT", + "bin": { + "he": "bin/he" + } + }, + "node_modules/ignore": { + "version": "5.3.1", + "resolved": "https://registry.npmjs.org/ignore/-/ignore-5.3.1.tgz", + "integrity": "sha512-5Fytz/IraMjqpwfd34ke28PTVMjZjJG2MPn5t7OE4eUCUNf8BAa7b5WUS9/Qvr6mwOQS7Mk6vdsMno5he+T8Xw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 4" + } + }, + "node_modules/immutable": { + "version": "4.3.6", + "resolved": "https://registry.npmjs.org/immutable/-/immutable-4.3.6.tgz", + "integrity": "sha512-Ju0+lEMyzMVZarkTn/gqRpdqd5dOPaz1mCZ0SH3JV6iFw81PldE/PEB1hWVEA288HPt4WXW8O7AWxB10M+03QQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/import-fresh": { + "version": "3.3.0", + "resolved": "https://registry.npmjs.org/import-fresh/-/import-fresh-3.3.0.tgz", + "integrity": "sha512-veYYhQa+D1QBKznvhUHxb8faxlrwUnxseDAbAp457E0wLNio2bOSKnjYDhMj+YiAq61xrMGhQk9iXVk5FzgQMw==", + "dev": true, + "license": "MIT", + "dependencies": { + "parent-module": "^1.0.0", + "resolve-from": "^4.0.0" + }, + "engines": { + "node": ">=6" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/imurmurhash": { + "version": "0.1.4", + "resolved": "https://registry.npmjs.org/imurmurhash/-/imurmurhash-0.1.4.tgz", + "integrity": "sha512-JmXMZ6wuvDmLiHEml9ykzqO6lwFbof0GG4IkcGaENdCRDDmMVnny7s5HsIgHCbaq0w2MyPhDqkhTUgS2LU2PHA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.8.19" + } + }, + "node_modules/inflight": { + "version": "1.0.6", + "resolved": "https://registry.npmjs.org/inflight/-/inflight-1.0.6.tgz", + "integrity": "sha512-k92I/b08q4wvFscXCLvqfsHCrjrF7yiXsQuIVvVE7N82W3+aqpzuUdBbfhWcy/FZR3/4IgflMgKLOsvPDrGCJA==", + "deprecated": "This module is not supported, and leaks memory. Do not use it. Check out lru-cache if you want a good and tested way to coalesce async requests by a key value, which is much more comprehensive and powerful.", + "dev": true, + "license": "ISC", + "dependencies": { + "once": "^1.3.0", + "wrappy": "1" + } + }, + "node_modules/inherits": { + "version": "2.0.4", + "resolved": "https://registry.npmjs.org/inherits/-/inherits-2.0.4.tgz", + "integrity": "sha512-k/vGaX4/Yla3WzyMCvTQOXYeIHvqOKtnqBduzTHpzpQZzAskKMhZ2K+EnBiSM9zGSoIFeMpXKxa4dYeZIQqewQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/is-binary-path": { + "version": "2.1.0", + "resolved": "https://registry.npmjs.org/is-binary-path/-/is-binary-path-2.1.0.tgz", + "integrity": "sha512-ZMERYes6pDydyuGidse7OsHxtbI7WVeUEozgR/g7rd0xUimYNlvZRE/K2MgZTjWy725IfelLeVcEM97mmtRGXw==", + "dev": true, + "license": "MIT", + "dependencies": { + "binary-extensions": "^2.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/is-extglob": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/is-extglob/-/is-extglob-2.1.1.tgz", + "integrity": "sha512-SbKbANkN603Vi4jEZv49LeVJMn4yGwsbzZworEoyEiutsN3nJYdbO36zfhGJ6QEDpOZIFkDtnq5JRxmvl3jsoQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-glob": { + "version": "4.0.3", + "resolved": "https://registry.npmjs.org/is-glob/-/is-glob-4.0.3.tgz", + "integrity": "sha512-xelSayHH36ZgE7ZWhli7pW34hNbNl8Ojv5KVmkJD4hBdD3th8Tfk9vYasLM+mXWOZhFkgZfxhLSnrwRr4elSSg==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-extglob": "^2.1.1" + }, + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/is-number": { + "version": "7.0.0", + "resolved": "https://registry.npmjs.org/is-number/-/is-number-7.0.0.tgz", + "integrity": "sha512-41Cifkg6e8TylSpdtTpeLVMqvSBEVzTttHvERD741+pnZ8ANv0004MRL43QKPDlK9cGvNp6NZWZUBlbGXYxxng==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.12.0" + } + }, + "node_modules/is-path-inside": { + "version": "3.0.3", + "resolved": "https://registry.npmjs.org/is-path-inside/-/is-path-inside-3.0.3.tgz", + "integrity": "sha512-Fd4gABb+ycGAmKou8eMftCupSir5lRxqf4aD/vd0cD2qc4HL07OjCeuHMr8Ro4CoMaeCKDB0/ECBOVWjTwUvPQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/isexe": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/isexe/-/isexe-2.0.0.tgz", + "integrity": "sha512-RHxMLp9lnKHGHRng9QFhRCMbYAcVpn69smSGcq3f36xjgVVWThj4qqLbTLlq7Ssj8B+fIQ1EuCEGI2lKsyQeIw==", + "dev": true, + "license": "ISC" + }, + "node_modules/js-yaml": { + "version": "4.1.0", + "resolved": "https://registry.npmjs.org/js-yaml/-/js-yaml-4.1.0.tgz", + "integrity": "sha512-wpxZs9NoxZaJESJGIZTyDEaYpl0FKSA+FB9aJiyemKhMwkxQg63h4T1KJgUGHpTqPDNRcmmYLugrRjJlBtWvRA==", + "license": "MIT", + "dependencies": { + "argparse": "^2.0.1" + }, + "bin": { + "js-yaml": "bin/js-yaml.js" + } + }, + "node_modules/json-buffer": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/json-buffer/-/json-buffer-3.0.1.tgz", + "integrity": "sha512-4bV5BfR2mqfQTJm+V5tPPdf+ZpuhiIvTuAB5g8kcrXOZpTT/QwwVRWBywX1ozr6lEuPdbHxwaJlm9G6mI2sfSQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-parse-even-better-errors": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/json-parse-even-better-errors/-/json-parse-even-better-errors-3.0.2.tgz", + "integrity": "sha512-fi0NG4bPjCHunUJffmLd0gxssIgkNmArMvis4iNah6Owg1MCJjWhEcDLmsK6iGkJq3tHwbDkTlce70/tmXN4cQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/json-schema-traverse": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/json-schema-traverse/-/json-schema-traverse-0.4.1.tgz", + "integrity": "sha512-xbbCH5dCYU5T8LcEhhuh7HJ88HXuW3qsI3Y0zOZFKfZEHcpWiHU/Jxzk629Brsab/mMiHQti9wMP+845RPe3Vg==", + "dev": true, + "license": "MIT" + }, + "node_modules/json-stable-stringify-without-jsonify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/json-stable-stringify-without-jsonify/-/json-stable-stringify-without-jsonify-1.0.1.tgz", + "integrity": "sha512-Bdboy+l7tA3OGW6FjyFHWkP5LuByj1Tk33Ljyq0axyzdk9//JSi2u3fP1QSmd1KNwq6VOKYGlAu87CisVir6Pw==", + "dev": true, + "license": "MIT" + }, + "node_modules/keyv": { + "version": "4.5.4", + "resolved": "https://registry.npmjs.org/keyv/-/keyv-4.5.4.tgz", + "integrity": "sha512-oxVHkHR/EJf2CNXnWxRLW6mg7JyCCUcG0DtEGmL2ctUo1PNTin1PUil+r/+4r5MpVgC/fn1kjsx7mjSujKqIpw==", + "dev": true, + "license": "MIT", + "dependencies": { + "json-buffer": "3.0.1" + } + }, + "node_modules/levn": { + "version": "0.4.1", + "resolved": "https://registry.npmjs.org/levn/-/levn-0.4.1.tgz", + "integrity": "sha512-+bT2uH4E5LGE7h/n3evcS/sQlJXCpIp6ym8OWJ5eV6+67Dsql/LaaT7qJBAt2rzfoa/5QBGBhxDix1dMt2kQKQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1", + "type-check": "~0.4.0" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/locate-path": { + "version": "6.0.0", + "resolved": "https://registry.npmjs.org/locate-path/-/locate-path-6.0.0.tgz", + "integrity": "sha512-iPZK6eYjbxRu3uB4/WZ3EsEIMJFMqAoopl3R+zuq0UjcAm/MO6KCweDgPfP3elTztoKP3KtnVHxTn2NHBSDVUw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-locate": "^5.0.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/lodash": { + "version": "4.17.21", + "resolved": "https://registry.npmjs.org/lodash/-/lodash-4.17.21.tgz", + "integrity": "sha512-v2kDEe57lecTulaDIuNTPy3Ry4gLGJ6Z1O3vE1krgXZNrsQ+LFTGHVxVjcXPs17LhbZVGedAJv8XZ1tvj5FvSg==", + "dev": true, + "license": "MIT" + }, + "node_modules/lodash.merge": { + "version": "4.6.2", + "resolved": "https://registry.npmjs.org/lodash.merge/-/lodash.merge-4.6.2.tgz", + "integrity": "sha512-0KpjqXRVvrYyCsX1swR/XTK0va6VQkQM6MNo7PqW77ByjAhoARA8EfrP1N4+KlKj8YS0ZUCtRT/YUuhyYDujIQ==", + "dev": true, + "license": "MIT" + }, + "node_modules/magic-string": { + "version": "0.30.10", + "resolved": "https://registry.npmjs.org/magic-string/-/magic-string-0.30.10.tgz", + "integrity": "sha512-iIRwTIf0QKV3UAnYK4PU8uiEc4SRh5jX0mwpIwETPpHdhVM4f53RSwS/vXvN1JhGX+Cs7B8qIq3d6AH49O5fAQ==", + "license": "MIT", + "dependencies": { + "@jridgewell/sourcemap-codec": "^1.4.15" + } + }, + "node_modules/memorystream": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/memorystream/-/memorystream-0.3.1.tgz", + "integrity": "sha512-S3UwM3yj5mtUSEfP41UZmt/0SCoVYUcU1rkXv+BQ5Ig8ndL4sPoJNBUJERafdPb5jjHJGuMgytgKvKIf58XNBw==", + "dev": true, + "engines": { + "node": ">= 0.10.0" + } + }, + "node_modules/merge2": { + "version": "1.4.1", + "resolved": "https://registry.npmjs.org/merge2/-/merge2-1.4.1.tgz", + "integrity": "sha512-8q7VEgMJW4J8tcfVPy8g09NcQwZdbwFEqhe/WZkoIzjn/3TGDwtOCYtXGxA3O8tPzpczCCDgv+P2P5y00ZJOOg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 8" + } + }, + "node_modules/micromatch": { + "version": "4.0.8", + "resolved": "https://registry.npmjs.org/micromatch/-/micromatch-4.0.8.tgz", + "integrity": "sha512-PXwfBhYu0hBCPw8Dn0E+WDYb7af3dSLVWKi3HGv84IdF4TyFoC0ysxFd0Goxw7nSv4T/PzEJQxsYsEiFCKo2BA==", + "dev": true, + "license": "MIT", + "dependencies": { + "braces": "^3.0.3", + "picomatch": "^2.3.1" + }, + "engines": { + "node": ">=8.6" + } + }, + "node_modules/mime-db": { + "version": "1.52.0", + "resolved": "https://registry.npmjs.org/mime-db/-/mime-db-1.52.0.tgz", + "integrity": "sha512-sPU4uV7dYlvtWJxwwxHD0PuihVNiE7TyAbQ5SWxDCB9mUYvOgroQOwYQQOKPJ8CIbE+1ETVlOoK1UC2nU3gYvg==", + "license": "MIT", + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/mime-types": { + "version": "2.1.35", + "resolved": "https://registry.npmjs.org/mime-types/-/mime-types-2.1.35.tgz", + "integrity": "sha512-ZDY+bPm5zTTF+YpCrAU9nK0UgICYPT0QtT1NZWFv4s++TNkcgVaT0g6+4R2uI4MjQjzysHB1zxuWL50hzaeXiw==", + "license": "MIT", + "dependencies": { + "mime-db": "1.52.0" + }, + "engines": { + "node": ">= 0.6" + } + }, + "node_modules/minimatch": { + "version": "9.0.3", + "resolved": "https://registry.npmjs.org/minimatch/-/minimatch-9.0.3.tgz", + "integrity": "sha512-RHiac9mvaRw0x3AYRgDC1CxAP7HTcNrrECeA8YYJeWnpo+2Q5CegtZjaotWTWxDG3UeGA1coE05iH1mPjT/2mg==", + "dev": true, + "license": "ISC", + "dependencies": { + "brace-expansion": "^2.0.1" + }, + "engines": { + "node": ">=16 || 14 >=14.17" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/ms": { + "version": "2.1.2", + "resolved": "https://registry.npmjs.org/ms/-/ms-2.1.2.tgz", + "integrity": "sha512-sGkPx+VjMtmA6MX27oA4FBFELFCZZ4S4XqeGOXCv68tT+jb3vk/RyaKWP0PTKyWtmLSM0b+adUTEvbs1PEaH2w==", + "dev": true, + "license": "MIT" + }, + "node_modules/muggle-string": { + "version": "0.3.1", + "resolved": "https://registry.npmjs.org/muggle-string/-/muggle-string-0.3.1.tgz", + "integrity": "sha512-ckmWDJjphvd/FvZawgygcUeQCxzvohjFO5RxTjj4eq8kw359gFF3E1brjfI+viLMxss5JrHTDRHZvu2/tuy0Qg==", + "dev": true, + "license": "MIT" + }, + "node_modules/nanoid": { + "version": "3.3.7", + "resolved": "https://registry.npmjs.org/nanoid/-/nanoid-3.3.7.tgz", + "integrity": "sha512-eSRppjcPIatRIMC1U6UngP8XFcz8MQWGQdt1MTBQ7NaAmvXDfvNxbvWV3x2y6CdEUciCSsDHDQZbhYaB8QEo2g==", + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "bin": { + "nanoid": "bin/nanoid.cjs" + }, + "engines": { + "node": "^10 || ^12 || ^13.7 || ^14 || >=15.0.1" + } + }, + "node_modules/natural-compare": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/natural-compare/-/natural-compare-1.4.0.tgz", + "integrity": "sha512-OWND8ei3VtNC9h7V60qff3SVobHr996CTwgxubgyQYEpg290h9J0buyECNNJexkFm5sOajh5G116RYA1c8ZMSw==", + "dev": true, + "license": "MIT" + }, + "node_modules/normalize-path": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/normalize-path/-/normalize-path-3.0.0.tgz", + "integrity": "sha512-6eZs5Ls3WtCisHWp9S2GUy8dqkpGi4BVSz3GaqiE6ezub0512ESztXUwUB6C6IKbQkY2Pnb/mD4WYojCRwcwLA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/npm-normalize-package-bin": { + "version": "3.0.1", + "resolved": "https://registry.npmjs.org/npm-normalize-package-bin/-/npm-normalize-package-bin-3.0.1.tgz", + "integrity": "sha512-dMxCf+zZ+3zeQZXKxmyuCKlIDPGuv8EF940xbkC4kQVDTtqoh6rJFO+JTKSA6/Rwi0getWmtuy4Itup0AMcaDQ==", + "dev": true, + "license": "ISC", + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/npm-run-all2": { + "version": "6.2.2", + "resolved": "https://registry.npmjs.org/npm-run-all2/-/npm-run-all2-6.2.2.tgz", + "integrity": "sha512-Q+alQAGIW7ZhKcxLt8GcSi3h3ryheD6xnmXahkMRVM5LYmajcUrSITm8h+OPC9RYWMV2GR0Q1ntTUCfxaNoOJw==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-styles": "^6.2.1", + "cross-spawn": "^7.0.3", + "memorystream": "^0.3.1", + "minimatch": "^9.0.0", + "pidtree": "^0.6.0", + "read-package-json-fast": "^3.0.2", + "shell-quote": "^1.7.3" + }, + "bin": { + "npm-run-all": "bin/npm-run-all/index.js", + "npm-run-all2": "bin/npm-run-all/index.js", + "run-p": "bin/run-p/index.js", + "run-s": "bin/run-s/index.js" + }, + "engines": { + "node": "^14.18.0 || ^16.13.0 || >=18.0.0", + "npm": ">= 8" + } + }, + "node_modules/npm-run-all2/node_modules/ansi-styles": { + "version": "6.2.1", + "resolved": "https://registry.npmjs.org/ansi-styles/-/ansi-styles-6.2.1.tgz", + "integrity": "sha512-bN798gFfQX+viw3R7yrGWRqnrN2oRkEkUjjl4JNn4E8GxxbjtG3FbrEIIY3l8/hrwUwIeCZvi4QuOTP4MErVug==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/chalk/ansi-styles?sponsor=1" + } + }, + "node_modules/nth-check": { + "version": "2.1.1", + "resolved": "https://registry.npmjs.org/nth-check/-/nth-check-2.1.1.tgz", + "integrity": "sha512-lqjrjmaOoAnWfMmBPL+XNnynZh2+swxiX3WUE0s4yEHI6m+AwrK2UZOimIRl3X/4QctVqS8AiZjFqyOGrMXb/w==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "boolbase": "^1.0.0" + }, + "funding": { + "url": "https://github.com/fb55/nth-check?sponsor=1" + } + }, + "node_modules/once": { + "version": "1.4.0", + "resolved": "https://registry.npmjs.org/once/-/once-1.4.0.tgz", + "integrity": "sha512-lNaJgI+2Q5URQBkccEKHTQOPaXdUxnZZElQTZY0MFUAuaEqe1E+Nyvgdz/aIyNi6Z9MzO5dv1H8n58/GELp3+w==", + "dev": true, + "license": "ISC", + "dependencies": { + "wrappy": "1" + } + }, + "node_modules/optionator": { + "version": "0.9.4", + "resolved": "https://registry.npmjs.org/optionator/-/optionator-0.9.4.tgz", + "integrity": "sha512-6IpQ7mKUxRcZNLIObR0hz7lxsapSSIYNZJwXPGeF0mTVqGKFIXj1DQcMoT22S3ROcLyY/rz0PWaWZ9ayWmad9g==", + "dev": true, + "license": "MIT", + "dependencies": { + "deep-is": "^0.1.3", + "fast-levenshtein": "^2.0.6", + "levn": "^0.4.1", + "prelude-ls": "^1.2.1", + "type-check": "^0.4.0", + "word-wrap": "^1.2.5" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/p-limit": { + "version": "3.1.0", + "resolved": "https://registry.npmjs.org/p-limit/-/p-limit-3.1.0.tgz", + "integrity": "sha512-TYOanM3wGwNGsZN2cVTYPArw454xnXj5qmWF1bEoAc4+cU/ol7GVh7odevjp1FNHduHc3KZMcFduxU5Xc6uJRQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "yocto-queue": "^0.1.0" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/p-locate": { + "version": "5.0.0", + "resolved": "https://registry.npmjs.org/p-locate/-/p-locate-5.0.0.tgz", + "integrity": "sha512-LaNjtRWUBY++zB5nE/NwcaoMylSPk+S+ZHNB1TzdbMJMny6dynpAGt7X/tl/QYq3TIeE6nxHppbo2LGymrG5Pw==", + "dev": true, + "license": "MIT", + "dependencies": { + "p-limit": "^3.0.2" + }, + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/parent-module": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/parent-module/-/parent-module-1.0.1.tgz", + "integrity": "sha512-GQ2EWRpQV8/o+Aw8YqtfZZPfNRWZYkbidE9k5rpl/hC3vtHHBfGm2Ifi6qWV+coDGkrUKZAxE3Lot5kcsRlh+g==", + "dev": true, + "license": "MIT", + "dependencies": { + "callsites": "^3.0.0" + }, + "engines": { + "node": ">=6" + } + }, + "node_modules/path-browserify": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-browserify/-/path-browserify-1.0.1.tgz", + "integrity": "sha512-b7uo2UCUOYZcnF/3ID0lulOJi/bafxa1xPe7ZPsammBSpjSWQkjNxlt635YGS2MiR9GjvuXCtz2emr3jbsz98g==", + "dev": true, + "license": "MIT" + }, + "node_modules/path-exists": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-exists/-/path-exists-4.0.0.tgz", + "integrity": "sha512-ak9Qy5Q7jYb2Wwcey5Fpvg2KoAc/ZIhLSLOSBmRmygPsGwkVVt0fZa0qrtMz+m6tJTAHfZQ8FnmB4MG4LWy7/w==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-is-absolute": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/path-is-absolute/-/path-is-absolute-1.0.1.tgz", + "integrity": "sha512-AVbw3UJ2e9bq64vSaS9Am0fje1Pa8pbGqTTsmXfaIiMpnr5DlDhfJOuLj9Sf95ZPVDAUerDfEk88MPmPe7UCQg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/path-key": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/path-key/-/path-key-3.1.1.tgz", + "integrity": "sha512-ojmeN0qd+y0jszEtoY48r0Peq5dwMEkIlCOu6Q5f41lfkswXuKtYrhgoTpLnyIcHm24Uhqx+5Tqm2InSwLhE6Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/path-type": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/path-type/-/path-type-4.0.0.tgz", + "integrity": "sha512-gDKb8aZMDeD/tZWs9P6+q0J9Mwkdl6xMV8TjnGP3qJVJ06bdMgkbBlLU8IdfOsIsFz2BW1rNVT3XuNEl8zPAvw==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/picocolors": { + "version": "1.0.1", + "resolved": "https://registry.npmjs.org/picocolors/-/picocolors-1.0.1.tgz", + "integrity": "sha512-anP1Z8qwhkbmu7MFP5iTt+wQKXgwzf7zTyGlcdzabySa9vd0Xt392U0rVmz9poOaBj0uHJKyyo9/upk0HrEQew==", + "license": "ISC" + }, + "node_modules/picomatch": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/picomatch/-/picomatch-2.3.1.tgz", + "integrity": "sha512-JU3teHTNjmE2VCGFzuY8EXzCDVwEqB2a8fsIvwaStHhAWJEeVd1o1QD80CU6+ZdEXXSLbSsuLwJjkCBWqRQUVA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8.6" + }, + "funding": { + "url": "https://github.com/sponsors/jonschlinkert" + } + }, + "node_modules/pidtree": { + "version": "0.6.0", + "resolved": "https://registry.npmjs.org/pidtree/-/pidtree-0.6.0.tgz", + "integrity": "sha512-eG2dWTVw5bzqGRztnHExczNxt5VGsE6OwTeCG3fdUf9KBsZzO3R5OIIIzWR+iZA0NtZ+RDVdaoE2dK1cn6jH4g==", + "dev": true, + "license": "MIT", + "bin": { + "pidtree": "bin/pidtree.js" + }, + "engines": { + "node": ">=0.10" + } + }, + "node_modules/pinia": { + "version": "2.1.7", + "resolved": "https://registry.npmjs.org/pinia/-/pinia-2.1.7.tgz", + "integrity": "sha512-+C2AHFtcFqjPih0zpYuvof37SFxMQ7OEG2zV9jRI12i9BOy3YQVAHwdKtyyc8pDcDyIc33WCIsZaCFWU7WWxGQ==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.5.0", + "vue-demi": ">=0.14.5" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "@vue/composition-api": "^1.4.0", + "typescript": ">=4.4.4", + "vue": "^2.6.14 || ^3.3.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + }, + "typescript": { + "optional": true + } + } + }, + "node_modules/pinia/node_modules/vue-demi": { + "version": "0.14.8", + "resolved": "https://registry.npmjs.org/vue-demi/-/vue-demi-0.14.8.tgz", + "integrity": "sha512-Uuqnk9YE9SsWeReYqK2alDI5YzciATE0r2SkA6iMAtuXvNTMNACJLJEXNXaEy94ECuBe4Sk6RzRU80kjdbIo1Q==", + "hasInstallScript": true, + "license": "MIT", + "bin": { + "vue-demi-fix": "bin/vue-demi-fix.js", + "vue-demi-switch": "bin/vue-demi-switch.js" + }, + "engines": { + "node": ">=12" + }, + "funding": { + "url": "https://github.com/sponsors/antfu" + }, + "peerDependencies": { + "@vue/composition-api": "^1.0.0-rc.1", + "vue": "^3.0.0-0 || ^2.6.0" + }, + "peerDependenciesMeta": { + "@vue/composition-api": { + "optional": true + } + } + }, + "node_modules/postcss": { + "version": "8.4.39", + "resolved": "https://registry.npmjs.org/postcss/-/postcss-8.4.39.tgz", + "integrity": "sha512-0vzE+lAiG7hZl1/9I8yzKLx3aR9Xbof3fBHKunvMfOCYAtMhrsnccJY2iTURb9EZd5+pLuiNV9/c/GZJOHsgIw==", + "funding": [ + { + "type": "opencollective", + "url": "https://opencollective.com/postcss/" + }, + { + "type": "tidelift", + "url": "https://tidelift.com/funding/github/npm/postcss" + }, + { + "type": "github", + "url": "https://github.com/sponsors/ai" + } + ], + "license": "MIT", + "dependencies": { + "nanoid": "^3.3.7", + "picocolors": "^1.0.1", + "source-map-js": "^1.2.0" + }, + "engines": { + "node": "^10 || ^12 || >=14" + } + }, + "node_modules/postcss-selector-parser": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/postcss-selector-parser/-/postcss-selector-parser-6.1.1.tgz", + "integrity": "sha512-b4dlw/9V8A71rLIDsSwVmak9z2DuBUB7CA1/wSdelNEzqsjoSPeADTWNO09lpH49Diy3/JIZ2bSPB1dI3LJCHg==", + "dev": true, + "license": "MIT", + "dependencies": { + "cssesc": "^3.0.0", + "util-deprecate": "^1.0.2" + }, + "engines": { + "node": ">=4" + } + }, + "node_modules/prelude-ls": { + "version": "1.2.1", + "resolved": "https://registry.npmjs.org/prelude-ls/-/prelude-ls-1.2.1.tgz", + "integrity": "sha512-vkcDPrRZo1QZLbn5RLGPpg/WmIQ65qoWWhcGKf/b5eplkkarX0m9z8ppCat4mlOqUsWpyNuYgO3VRyrYHSzX5g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/prettier": { + "version": "3.3.3", + "resolved": "https://registry.npmjs.org/prettier/-/prettier-3.3.3.tgz", + "integrity": "sha512-i2tDNA0O5IrMO757lfrdQZCc2jPNDVntV0m/+4whiDfWaTKfMNgR7Qz0NAeGz/nRqF4m5/6CLzbP4/liHt12Ew==", + "dev": true, + "license": "MIT", + "bin": { + "prettier": "bin/prettier.cjs" + }, + "engines": { + "node": ">=14" + }, + "funding": { + "url": "https://github.com/prettier/prettier?sponsor=1" + } + }, + "node_modules/prettier-linter-helpers": { + "version": "1.0.0", + "resolved": "https://registry.npmjs.org/prettier-linter-helpers/-/prettier-linter-helpers-1.0.0.tgz", + "integrity": "sha512-GbK2cP9nraSSUF9N2XwUwqfzlAFlMNYYl+ShE/V+H8a9uNl/oUqB1w2EL54Jh0OlyRSd8RfWYJ3coVS4TROP2w==", + "dev": true, + "license": "MIT", + "dependencies": { + "fast-diff": "^1.1.2" + }, + "engines": { + "node": ">=6.0.0" + } + }, + "node_modules/proxy-from-env": { + "version": "1.1.0", + "resolved": "https://registry.npmjs.org/proxy-from-env/-/proxy-from-env-1.1.0.tgz", + "integrity": "sha512-D+zkORCbA9f1tdWRK0RaCR3GPv50cMxcrz4X8k5LTSUD1Dkw47mKJEZQNunItRTkWwgtaUSo1RVFRIG9ZXiFYg==", + "license": "MIT" + }, + "node_modules/punycode": { + "version": "2.3.1", + "resolved": "https://registry.npmjs.org/punycode/-/punycode-2.3.1.tgz", + "integrity": "sha512-vYt7UD1U9Wg6138shLtLOvdAu+8DsC/ilFtEVHcH+wydcSpNE20AfSOduf6MkRFahL5FY7X1oU7nKVZFtfq8Fg==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=6" + } + }, + "node_modules/quasar": { + "version": "2.16.6", + "resolved": "https://registry.npmjs.org/quasar/-/quasar-2.16.6.tgz", + "integrity": "sha512-nFFS/WniZy052wUNr0UHxc46KVB0hVg7IlBpVScxQAANQxL5uJVqaiw0z2CEM8IM+MCObXANqxHX/m7woG7zIw==", + "license": "MIT", + "engines": { + "node": ">= 10.18.1", + "npm": ">= 6.13.4", + "yarn": ">= 1.21.1" + }, + "funding": { + "type": "github", + "url": "https://donate.quasar.dev" + } + }, + "node_modules/queue-microtask": { + "version": "1.2.3", + "resolved": "https://registry.npmjs.org/queue-microtask/-/queue-microtask-1.2.3.tgz", + "integrity": "sha512-NuaNSa6flKT5JaSYQzJok04JzTL1CA6aGhv5rfLW3PgqA+M2ChpZQnAC8h8i4ZFkBS8X5RqkDBHA7r4hej3K9A==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT" + }, + "node_modules/read-package-json-fast": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/read-package-json-fast/-/read-package-json-fast-3.0.2.tgz", + "integrity": "sha512-0J+Msgym3vrLOUB3hzQCuZHII0xkNGCtz/HJH9xZshwv9DbDwkw1KaE3gx/e2J5rpEY5rtOy6cyhKOPrkP7FZw==", + "dev": true, + "license": "ISC", + "dependencies": { + "json-parse-even-better-errors": "^3.0.0", + "npm-normalize-package-bin": "^3.0.0" + }, + "engines": { + "node": "^14.17.0 || ^16.13.0 || >=18.0.0" + } + }, + "node_modules/readdirp": { + "version": "3.6.0", + "resolved": "https://registry.npmjs.org/readdirp/-/readdirp-3.6.0.tgz", + "integrity": "sha512-hOS089on8RduqdbhvQ5Z37A0ESjsqz6qnRcffsMU3495FuTdqSm+7bhJ29JvIOsBDEEnan5DPu9t3To9VRlMzA==", + "dev": true, + "license": "MIT", + "dependencies": { + "picomatch": "^2.2.1" + }, + "engines": { + "node": ">=8.10.0" + } + }, + "node_modules/resolve-from": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/resolve-from/-/resolve-from-4.0.0.tgz", + "integrity": "sha512-pb/MYmXstAkysRFx8piNI1tGFNQIFA3vkE3Gq4EuA1dF6gHp/+vgZqsCGJapvy8N3Q+4o7FwvquPJcnZ7RYy4g==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=4" + } + }, + "node_modules/reusify": { + "version": "1.0.4", + "resolved": "https://registry.npmjs.org/reusify/-/reusify-1.0.4.tgz", + "integrity": "sha512-U9nH88a3fc/ekCF1l0/UP1IosiuIjyTh7hBvXVMHYgVcfGvt897Xguj2UOLDeI5BG2m7/uwyaLVT6fbtCwTyzw==", + "dev": true, + "license": "MIT", + "engines": { + "iojs": ">=1.0.0", + "node": ">=0.10.0" + } + }, + "node_modules/rimraf": { + "version": "3.0.2", + "resolved": "https://registry.npmjs.org/rimraf/-/rimraf-3.0.2.tgz", + "integrity": "sha512-JZkJMZkAGFFPP2YqXZXPbMlMBgsxzE8ILs4lMIX/2o0L9UBw9O/Y3o6wFw/i9YLapcUJWwqbi3kdxIPdC62TIA==", + "deprecated": "Rimraf versions prior to v4 are no longer supported", + "dev": true, + "license": "ISC", + "dependencies": { + "glob": "^7.1.3" + }, + "bin": { + "rimraf": "bin.js" + }, + "funding": { + "url": "https://github.com/sponsors/isaacs" + } + }, + "node_modules/rollup": { + "version": "4.18.1", + "resolved": "https://registry.npmjs.org/rollup/-/rollup-4.18.1.tgz", + "integrity": "sha512-Elx2UT8lzxxOXMpy5HWQGZqkrQOtrVDDa/bm9l10+U4rQnVzbL/LgZ4NOM1MPIDyHk69W4InuYDF5dzRh4Kw1A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@types/estree": "1.0.5" + }, + "bin": { + "rollup": "dist/bin/rollup" + }, + "engines": { + "node": ">=18.0.0", + "npm": ">=8.0.0" + }, + "optionalDependencies": { + "@rollup/rollup-android-arm-eabi": "4.18.1", + "@rollup/rollup-android-arm64": "4.18.1", + "@rollup/rollup-darwin-arm64": "4.18.1", + "@rollup/rollup-darwin-x64": "4.18.1", + "@rollup/rollup-linux-arm-gnueabihf": "4.18.1", + "@rollup/rollup-linux-arm-musleabihf": "4.18.1", + "@rollup/rollup-linux-arm64-gnu": "4.18.1", + "@rollup/rollup-linux-arm64-musl": "4.18.1", + "@rollup/rollup-linux-powerpc64le-gnu": "4.18.1", + "@rollup/rollup-linux-riscv64-gnu": "4.18.1", + "@rollup/rollup-linux-s390x-gnu": "4.18.1", + "@rollup/rollup-linux-x64-gnu": "4.18.1", + "@rollup/rollup-linux-x64-musl": "4.18.1", + "@rollup/rollup-win32-arm64-msvc": "4.18.1", + "@rollup/rollup-win32-ia32-msvc": "4.18.1", + "@rollup/rollup-win32-x64-msvc": "4.18.1", + "fsevents": "~2.3.2" + } + }, + "node_modules/run-parallel": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/run-parallel/-/run-parallel-1.2.0.tgz", + "integrity": "sha512-5l4VyZR86LZ/lDxZTR6jqL8AFE2S0IFLMP26AbjsLVADxHdhB/c0GUsH+y39UfCi3dzz8OlQuPmnaJOMoDHQBA==", + "dev": true, + "funding": [ + { + "type": "github", + "url": "https://github.com/sponsors/feross" + }, + { + "type": "patreon", + "url": "https://www.patreon.com/feross" + }, + { + "type": "consulting", + "url": "https://feross.org/support" + } + ], + "license": "MIT", + "dependencies": { + "queue-microtask": "^1.2.2" + } + }, + "node_modules/sass": { + "version": "1.77.8", + "resolved": "https://registry.npmjs.org/sass/-/sass-1.77.8.tgz", + "integrity": "sha512-4UHg6prsrycW20fqLGPShtEvo/WyHRVRHwOP4DzkUrObWoWI05QBSfzU71TVB7PFaL104TwNaHpjlWXAZbQiNQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "chokidar": ">=3.0.0 <4.0.0", + "immutable": "^4.0.0", + "source-map-js": ">=0.6.2 <2.0.0" + }, + "bin": { + "sass": "sass.js" + }, + "engines": { + "node": ">=14.0.0" + } + }, + "node_modules/semver": { + "version": "7.6.3", + "resolved": "https://registry.npmjs.org/semver/-/semver-7.6.3.tgz", + "integrity": "sha512-oVekP1cKtI+CTDvHWYFUcMtsK/00wmAEfyqKfNdARm8u1wNVhSgaX7A8d4UuIlUI5e84iEwOhs7ZPYRmzU9U6A==", + "dev": true, + "license": "ISC", + "bin": { + "semver": "bin/semver.js" + }, + "engines": { + "node": ">=10" + } + }, + "node_modules/shebang-command": { + "version": "2.0.0", + "resolved": "https://registry.npmjs.org/shebang-command/-/shebang-command-2.0.0.tgz", + "integrity": "sha512-kHxr2zZpYtdmrN1qDjrrX/Z1rR1kG8Dx+gkpK1G4eXmvXswmcE1hTWBWYUzlraYw1/yZp6YuDY77YtvbN0dmDA==", + "dev": true, + "license": "MIT", + "dependencies": { + "shebang-regex": "^3.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/shebang-regex": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/shebang-regex/-/shebang-regex-3.0.0.tgz", + "integrity": "sha512-7++dFhtcx3353uBaq8DDR4NuxBetBzC7ZQOhmTQInHEd6bSrXdiEyzCvG07Z44UYdLShWUyXt5M/yhz8ekcb1A==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/shell-quote": { + "version": "1.8.1", + "resolved": "https://registry.npmjs.org/shell-quote/-/shell-quote-1.8.1.tgz", + "integrity": "sha512-6j1W9l1iAs/4xYBI1SYOVZyFcCis9b4KCLQ8fgAGG07QvzaRLVVRQvAy85yNmmZSjYjg4MWh4gNvlPujU/5LpA==", + "dev": true, + "license": "MIT", + "funding": { + "url": "https://github.com/sponsors/ljharb" + } + }, + "node_modules/slash": { + "version": "3.0.0", + "resolved": "https://registry.npmjs.org/slash/-/slash-3.0.0.tgz", + "integrity": "sha512-g9Q1haeby36OSStwb4ntCGGGaKsaVSjQ68fBxoQcutl5fS1vuY18H3wSt3jFyFtrkx+Kz0V1G85A4MyAdDMi2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + } + }, + "node_modules/source-map-js": { + "version": "1.2.0", + "resolved": "https://registry.npmjs.org/source-map-js/-/source-map-js-1.2.0.tgz", + "integrity": "sha512-itJW8lvSA0TXEphiRoawsCksnlf8SyvmFzIhltqAHluXd88pkCd+cXJVHTDwdCr0IzwptSm035IHQktUu1QUMg==", + "license": "BSD-3-Clause", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/strip-ansi": { + "version": "6.0.1", + "resolved": "https://registry.npmjs.org/strip-ansi/-/strip-ansi-6.0.1.tgz", + "integrity": "sha512-Y38VPSHcqkFrCpFnQ9vuSXmquuv5oXOKpGeT6aGrr3o3Gc9AlVa6JBfUSOCnbxGGZF+/0ooI7KrPuUSztUdU5A==", + "dev": true, + "license": "MIT", + "dependencies": { + "ansi-regex": "^5.0.1" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/strip-json-comments": { + "version": "3.1.1", + "resolved": "https://registry.npmjs.org/strip-json-comments/-/strip-json-comments-3.1.1.tgz", + "integrity": "sha512-6fPc+R4ihwqP6N/aIv2f1gMH8lOVtWQHoqC4yK6oSDVVocumAsfCqjkXnqiYMhmMwS/mEHLp7Vehlt3ql6lEig==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=8" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/style-mod": { + "version": "4.1.2", + "resolved": "https://registry.npmjs.org/style-mod/-/style-mod-4.1.2.tgz", + "integrity": "sha512-wnD1HyVqpJUI2+eKZ+eo1UwghftP6yuFheBqqe+bWCotBjC2K1YnteJILRMs3SM4V/0dLEW1SC27MWP5y+mwmw==", + "license": "MIT" + }, + "node_modules/supports-color": { + "version": "7.2.0", + "resolved": "https://registry.npmjs.org/supports-color/-/supports-color-7.2.0.tgz", + "integrity": "sha512-qpCAvRl9stuOHveKsn7HncJRvv501qIacKzQlO/+Lwxc9+0q2wLyv4Dfvt80/DPn2pqOBsJdDiogXGR9+OvwRw==", + "dev": true, + "license": "MIT", + "dependencies": { + "has-flag": "^4.0.0" + }, + "engines": { + "node": ">=8" + } + }, + "node_modules/synckit": { + "version": "0.9.1", + "resolved": "https://registry.npmjs.org/synckit/-/synckit-0.9.1.tgz", + "integrity": "sha512-7gr8p9TQP6RAHusBOSLs46F4564ZrjV8xFmw5zCmgmhGUcw2hxsShhJ6CEiHQMgPDwAQ1fWHPM0ypc4RMAig4A==", + "dev": true, + "license": "MIT", + "dependencies": { + "@pkgr/core": "^0.1.0", + "tslib": "^2.6.2" + }, + "engines": { + "node": "^14.18.0 || >=16.0.0" + }, + "funding": { + "url": "https://opencollective.com/unts" + } + }, + "node_modules/text-table": { + "version": "0.2.0", + "resolved": "https://registry.npmjs.org/text-table/-/text-table-0.2.0.tgz", + "integrity": "sha512-N+8UisAXDGk8PFXP4HAzVR9nbfmVJ3zYLAWiTIoqC5v5isinhr+r5uaO8+7r3BMfuNIufIsA7RdpVgacC2cSpw==", + "dev": true, + "license": "MIT" + }, + "node_modules/to-regex-range": { + "version": "5.0.1", + "resolved": "https://registry.npmjs.org/to-regex-range/-/to-regex-range-5.0.1.tgz", + "integrity": "sha512-65P7iz6X5yEr1cwcgvQxbbIw7Uk3gOy5dIdtZ4rDveLqhrdJP+Li/Hx6tyK0NEb+2GCyneCMJiGqrADCSNk8sQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "is-number": "^7.0.0" + }, + "engines": { + "node": ">=8.0" + } + }, + "node_modules/ts-api-utils": { + "version": "1.3.0", + "resolved": "https://registry.npmjs.org/ts-api-utils/-/ts-api-utils-1.3.0.tgz", + "integrity": "sha512-UQMIo7pb8WRomKR1/+MFVLTroIvDVtMX3K6OUir8ynLyzB8Jeriont2bTAtmNPa1ekAgN7YPDyf6V+ygrdU+eQ==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=16" + }, + "peerDependencies": { + "typescript": ">=4.2.0" + } + }, + "node_modules/tslib": { + "version": "2.6.3", + "resolved": "https://registry.npmjs.org/tslib/-/tslib-2.6.3.tgz", + "integrity": "sha512-xNvxJEOUiWPGhUuUdQgAJPKOOJfGnIyKySOc09XkKsgdUV/3E2zvwZYdejjmRgPCgcym1juLH3226yA7sEFJKQ==", + "dev": true, + "license": "0BSD" + }, + "node_modules/type-check": { + "version": "0.4.0", + "resolved": "https://registry.npmjs.org/type-check/-/type-check-0.4.0.tgz", + "integrity": "sha512-XleUoc9uwGXqjWwXaUTZAmzMcFZ5858QA2vvx1Ur5xIcixXIP+8LnFDgRplU30us6teqdlskFfu+ae4K79Ooew==", + "dev": true, + "license": "MIT", + "dependencies": { + "prelude-ls": "^1.2.1" + }, + "engines": { + "node": ">= 0.8.0" + } + }, + "node_modules/type-fest": { + "version": "0.20.2", + "resolved": "https://registry.npmjs.org/type-fest/-/type-fest-0.20.2.tgz", + "integrity": "sha512-Ne+eE4r0/iWnpAxD852z3A+N0Bt5RN//NjJwRd2VFHEmrywxf5vsZlh4R6lixl6B+wz/8d+maTSAkN1FIkI3LQ==", + "dev": true, + "license": "(MIT OR CC0-1.0)", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + }, + "node_modules/typescript": { + "version": "5.3.3", + "resolved": "https://registry.npmjs.org/typescript/-/typescript-5.3.3.tgz", + "integrity": "sha512-pXWcraxM0uxAS+tN0AG/BF2TyqmHO014Z070UsJ+pFvYuRSq8KH8DmWpnbXe0pEPDHXZV3FcAbJkijJ5oNEnWw==", + "devOptional": true, + "license": "Apache-2.0", + "bin": { + "tsc": "bin/tsc", + "tsserver": "bin/tsserver" + }, + "engines": { + "node": ">=14.17" + } + }, + "node_modules/undici-types": { + "version": "5.26.5", + "resolved": "https://registry.npmjs.org/undici-types/-/undici-types-5.26.5.tgz", + "integrity": "sha512-JlCMO+ehdEIKqlFxk6IfVoAUVmgz7cU7zD/h9XZ0qzeosSHmUJVOzSQvvYSYWXkFXC+IfLKSIffhv0sVZup6pA==", + "dev": true, + "license": "MIT" + }, + "node_modules/uri-js": { + "version": "4.4.1", + "resolved": "https://registry.npmjs.org/uri-js/-/uri-js-4.4.1.tgz", + "integrity": "sha512-7rKUyy33Q1yc98pQ1DAmLtwX109F7TIfWlW1Ydo8Wl1ii1SeHieeh0HHfPeL2fMXK6z0s8ecKs9frCuLJvndBg==", + "dev": true, + "license": "BSD-2-Clause", + "dependencies": { + "punycode": "^2.1.0" + } + }, + "node_modules/util-deprecate": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/util-deprecate/-/util-deprecate-1.0.2.tgz", + "integrity": "sha512-EPD5q1uXyFxJpCrLnCc1nHnq3gOa6DZBocAIiI2TaSCA7VCJ1UJDMagCzIkXNsUYfD1daK//LTEQ8xiIbrHtcw==", + "dev": true, + "license": "MIT" + }, + "node_modules/vite": { + "version": "5.3.4", + "resolved": "https://registry.npmjs.org/vite/-/vite-5.3.4.tgz", + "integrity": "sha512-Cw+7zL3ZG9/NZBB8C+8QbQZmR54GwqIz+WMI4b3JgdYJvX+ny9AjJXqkGQlDXSXRP9rP0B4tbciRMOVEKulVOA==", + "dev": true, + "license": "MIT", + "dependencies": { + "esbuild": "^0.21.3", + "postcss": "^8.4.39", + "rollup": "^4.13.0" + }, + "bin": { + "vite": "bin/vite.js" + }, + "engines": { + "node": "^18.0.0 || >=20.0.0" + }, + "funding": { + "url": "https://github.com/vitejs/vite?sponsor=1" + }, + "optionalDependencies": { + "fsevents": "~2.3.3" + }, + "peerDependencies": { + "@types/node": "^18.0.0 || >=20.0.0", + "less": "*", + "lightningcss": "^1.21.0", + "sass": "*", + "stylus": "*", + "sugarss": "*", + "terser": "^5.4.0" + }, + "peerDependenciesMeta": { + "@types/node": { + "optional": true + }, + "less": { + "optional": true + }, + "lightningcss": { + "optional": true + }, + "sass": { + "optional": true + }, + "stylus": { + "optional": true + }, + "sugarss": { + "optional": true + }, + "terser": { + "optional": true + } + } + }, + "node_modules/vue": { + "version": "3.4.32", + "resolved": "https://registry.npmjs.org/vue/-/vue-3.4.32.tgz", + "integrity": "sha512-9mCGIAi/CAq7GtaLLLp2J92pEic+HArstG+pq6F+H7+/jB9a0Z7576n4Bh4k79/50L1cKMIhZC3MC0iGpl+1IA==", + "license": "MIT", + "dependencies": { + "@vue/compiler-dom": "3.4.32", + "@vue/compiler-sfc": "3.4.32", + "@vue/runtime-dom": "3.4.32", + "@vue/server-renderer": "3.4.32", + "@vue/shared": "3.4.32" + }, + "peerDependencies": { + "typescript": "*" + }, + "peerDependenciesMeta": { + "typescript": { + "optional": true + } + } + }, + "node_modules/vue-codemirror": { + "version": "6.1.1", + "resolved": "https://registry.npmjs.org/vue-codemirror/-/vue-codemirror-6.1.1.tgz", + "integrity": "sha512-rTAYo44owd282yVxKtJtnOi7ERAcXTeviwoPXjIc6K/IQYUsoDkzPvw/JDFtSP6T7Cz/2g3EHaEyeyaQCKoDMg==", + "license": "MIT", + "dependencies": { + "@codemirror/commands": "6.x", + "@codemirror/language": "6.x", + "@codemirror/state": "6.x", + "@codemirror/view": "6.x" + }, + "peerDependencies": { + "codemirror": "6.x", + "vue": "3.x" + } + }, + "node_modules/vue-eslint-parser": { + "version": "9.4.3", + "resolved": "https://registry.npmjs.org/vue-eslint-parser/-/vue-eslint-parser-9.4.3.tgz", + "integrity": "sha512-2rYRLWlIpaiN8xbPiDyXZXRgLGOtWxERV7ND5fFAv5qo1D2N9Fu9MNajBNc6o13lZ+24DAWCkQCvj4klgmcITg==", + "dev": true, + "license": "MIT", + "dependencies": { + "debug": "^4.3.4", + "eslint-scope": "^7.1.1", + "eslint-visitor-keys": "^3.3.0", + "espree": "^9.3.1", + "esquery": "^1.4.0", + "lodash": "^4.17.21", + "semver": "^7.3.6" + }, + "engines": { + "node": "^14.17.0 || >=16.0.0" + }, + "funding": { + "url": "https://github.com/sponsors/mysticatea" + }, + "peerDependencies": { + "eslint": ">=6.0.0" + } + }, + "node_modules/vue-router": { + "version": "4.4.0", + "resolved": "https://registry.npmjs.org/vue-router/-/vue-router-4.4.0.tgz", + "integrity": "sha512-HB+t2p611aIZraV2aPSRNXf0Z/oLZFrlygJm+sZbdJaW6lcFqEDQwnzUBXn+DApw+/QzDU/I9TeWx9izEjTmsA==", + "license": "MIT", + "dependencies": { + "@vue/devtools-api": "^6.5.1" + }, + "funding": { + "url": "https://github.com/sponsors/posva" + }, + "peerDependencies": { + "vue": "^3.2.0" + } + }, + "node_modules/vue-template-compiler": { + "version": "2.7.16", + "resolved": "https://registry.npmjs.org/vue-template-compiler/-/vue-template-compiler-2.7.16.tgz", + "integrity": "sha512-AYbUWAJHLGGQM7+cNTELw+KsOG9nl2CnSv467WobS5Cv9uk3wFcnr1Etsz2sEIHEZvw1U+o9mRlEO6QbZvUPGQ==", + "dev": true, + "license": "MIT", + "dependencies": { + "de-indent": "^1.0.2", + "he": "^1.2.0" + } + }, + "node_modules/vue-tsc": { + "version": "1.8.27", + "resolved": "https://registry.npmjs.org/vue-tsc/-/vue-tsc-1.8.27.tgz", + "integrity": "sha512-WesKCAZCRAbmmhuGl3+VrdWItEvfoFIPXOvUJkjULi+x+6G/Dy69yO3TBRJDr9eUlmsNAwVmxsNZxvHKzbkKdg==", + "dev": true, + "license": "MIT", + "dependencies": { + "@volar/typescript": "~1.11.1", + "@vue/language-core": "1.8.27", + "semver": "^7.5.4" + }, + "bin": { + "vue-tsc": "bin/vue-tsc.js" + }, + "peerDependencies": { + "typescript": "*" + } + }, + "node_modules/w3c-keyname": { + "version": "2.2.8", + "resolved": "https://registry.npmjs.org/w3c-keyname/-/w3c-keyname-2.2.8.tgz", + "integrity": "sha512-dpojBhNsCNN7T82Tm7k26A6G9ML3NkhDsnw9n/eoxSRlVBB4CEtIQ/KTCLI2Fwf3ataSXRhYFkQi3SlnFwPvPQ==", + "license": "MIT" + }, + "node_modules/which": { + "version": "2.0.2", + "resolved": "https://registry.npmjs.org/which/-/which-2.0.2.tgz", + "integrity": "sha512-BLI3Tl1TW3Pvl70l3yq3Y64i+awpwXqsGBYWkkqMtnbXgrMD+yj7rhW0kuEDxzJaYXGjEW5ogapKNMEKNMjibA==", + "dev": true, + "license": "ISC", + "dependencies": { + "isexe": "^2.0.0" + }, + "bin": { + "node-which": "bin/node-which" + }, + "engines": { + "node": ">= 8" + } + }, + "node_modules/word-wrap": { + "version": "1.2.5", + "resolved": "https://registry.npmjs.org/word-wrap/-/word-wrap-1.2.5.tgz", + "integrity": "sha512-BN22B5eaMMI9UMtjrGd5g5eCYPpCPDUy0FJXbYsaT5zYxjFOckS53SQDE3pWkVoWpHXVb3BrYcEN4Twa55B5cA==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=0.10.0" + } + }, + "node_modules/wrappy": { + "version": "1.0.2", + "resolved": "https://registry.npmjs.org/wrappy/-/wrappy-1.0.2.tgz", + "integrity": "sha512-l4Sp/DRseor9wL6EvV2+TuQn63dMkPjZ/sp9XkghTEbV9KlPS1xUsZ3u7/IQO4wxtcFB4bgpQPRcR3QCvezPcQ==", + "dev": true, + "license": "ISC" + }, + "node_modules/xml-name-validator": { + "version": "4.0.0", + "resolved": "https://registry.npmjs.org/xml-name-validator/-/xml-name-validator-4.0.0.tgz", + "integrity": "sha512-ICP2e+jsHvAj2E2lIHxa5tjXRlKDJo4IdvPvCXbXQGdzSfmSpNVyIKMvoZHjDY9DP0zV17iI85o90vRFXNccRw==", + "dev": true, + "license": "Apache-2.0", + "engines": { + "node": ">=12" + } + }, + "node_modules/yaml": { + "version": "2.5.0", + "resolved": "https://registry.npmjs.org/yaml/-/yaml-2.5.0.tgz", + "integrity": "sha512-2wWLbGbYDiSqqIKoPjar3MPgB94ErzCtrNE1FdqGuaO0pi2JGjmE8aW8TDZwzU7vuxcGRdL/4gPQwQ7hD5AMSw==", + "license": "ISC", + "bin": { + "yaml": "bin.mjs" + }, + "engines": { + "node": ">= 14" + } + }, + "node_modules/yocto-queue": { + "version": "0.1.0", + "resolved": "https://registry.npmjs.org/yocto-queue/-/yocto-queue-0.1.0.tgz", + "integrity": "sha512-rVksvsnNCdJ/ohGc6xgPwyN8eheCxsiLM8mxuE/t/mOVqJewPuO1miLpTHQiRgTKCLexL4MeAFVagts7HmNZ2Q==", + "dev": true, + "license": "MIT", + "engines": { + "node": ">=10" + }, + "funding": { + "url": "https://github.com/sponsors/sindresorhus" + } + } + } +} diff --git a/src/frontend/package.json b/src/frontend/package.json index c878beee3..cf859fc84 100644 --- a/src/frontend/package.json +++ b/src/frontend/package.json @@ -25,7 +25,8 @@ "quasar": "^2.14.2", "vue": "^3.3.11", "vue-codemirror": "^6.1.1", - "vue-router": "^4.2.5" + "vue-router": "^4.2.5", + "yaml": "^2.5.0" }, "devDependencies": { "@quasar/vite-plugin": "^1.6.0", diff --git a/src/frontend/src/components/CodeEditor.vue b/src/frontend/src/components/CodeEditor.vue index f74ff458f..a8c2a0f93 100644 --- a/src/frontend/src/components/CodeEditor.vue +++ b/src/frontend/src/components/CodeEditor.vue @@ -7,18 +7,16 @@ :indent-with-tab="true" :tab-size="2" :extensions="extensions" + :disabled="readOnly" @ready="handleReady" - @change="console.log('change', $event)" - @focus="console.log('focus', $event)" - @blur="console.log('blur', $event)" + @update="highlightPlaceholder" :style="{ 'min-height': '250px', 'max-height': '70vh', 'border': `${showError ? '2px solid red' : '2px solid black'}` }" /> {{ showError || '...' }} @@ -33,9 +31,58 @@ import { linter, lintGutter } from "@codemirror/lint" import parser from "js-yaml" import { python } from '@codemirror/lang-python' - import { EditorState } from '@codemirror/state' + import { CompletionContext, autocompletion, startCompletion } from '@codemirror/autocomplete' + import YAML from 'yaml' - const props = defineProps(['placeholder', 'language', 'readOnly', 'showError']) + function myCompletions(context) { + let word = context.matchBefore(/\w*/) + if (word.from == word.to && !context.explicit) { + return null + } + + // Get the current line from the document + const line = context.state.doc.lineAt(context.pos) + const lineText = line.text + + // Get the text before the cursor + const textBeforeCursor = lineText.slice(0, context.pos - line.from) + + // Check if the cursor is typing a new key before the colon + const isTypingBeforeColon = /^[^\s:]+$/.test(textBeforeCursor.trim()) + + // If the user is typing a new key before the colon, suppress the autocompletion + if (isTypingBeforeColon) return null + + // Determine the current top-level key + let topLevelKeys = [] + try { + topLevelKeys = Object.keys(YAML.parse(context.state.doc.toString())) + } catch (error) { + console.error("Failed to parse YAML:", error) + } + let currentTopLevelKey = null + + for (const key of topLevelKeys) { + if (context.pos >= context.state.doc.toString().indexOf(key)) { + currentTopLevelKey = key + } + } + + // Filter out the current top-level key from the autocompletions + const filteredTopLevelKeys = getTopLevelKeys().filter(option => option.label !== `$${currentTopLevelKey}`) + + return { + from: word.from, + options: [...props.autocompletions, ...filteredTopLevelKeys], + // options: [ + // {label: "match", type: "keyword"}, + // {label: "hello", type: "variable", info: "(World)"}, + // {label: "magic", type: "text", apply: "⠁⭒*.✩.*⭒⠁", detail: "macro"} + // ] + } + } + + const props = defineProps(['placeholder', 'language', 'readOnly', 'showError', 'autocompletions']) const code = defineModel() @@ -45,27 +92,77 @@ view.value = payload.view } + function highlightPlaceholder(update) { + if(!view.value || update.docChanged || props.language === 'python') return + const from = view.value.state.selection.ranges[0].from + const to = view.value.state.selection.ranges[0].to + if(from !== to) return // short circut if user is dragging cursor + + const placeholders = ['', ''] + placeholders.forEach((placeholder) => { + let startIndex = code.value.indexOf(placeholder) + while(startIndex !== -1) { + const endIndex = startIndex + placeholder.length + + // Check if the cursor position is within the bounds of the current placeholder instance + if(from >= startIndex && from <= endIndex && from !== startIndex && from !== endIndex) { + console.log(`Cursor is at position ${from}, within the substring '${placeholder}' from index ${startIndex} to ${endIndex}`) + view.value.dispatch({ + selection: { anchor: startIndex, head: endIndex } + }) + if(placeholder === '') { + startCompletion(view.value) + } + return // Break after the first match to avoid overlapping selection conflicts + } + + // Move to the next possible start index to continue searching + startIndex = code.value.indexOf(placeholder, startIndex + placeholder.length) + } + }) + } + + const yamlLinter = linter((view) => { - const diagnostics = []; + const diagnostics = [] try { - parser.load(view.state.doc); + parser.load(view.state.doc) } catch (e) { - const loc = e.mark; - const from = loc ? loc.position : 0; - const to = from; - const severity = "error"; + const loc = e.mark + const from = loc ? loc.position : 0 + const to = from + const severity = "error" diagnostics.push({ from, to, message: e.message, severity - }); + }) } - return diagnostics; + return diagnostics }) + function getTopLevelKeys() { + try { + if(code.value) { + let output = [] + const keys = Object.keys(YAML.parse(code.value)).filter((key) => key !== '') + keys.forEach((key) => { + output.push({ + label: `$${key}`, + type: 'keyword' + }) + }) + return output + } + return [] + } catch (error) { + console.error('YAML Parsing Error:', error) + return [] + } + } const extensions = computed(() => { if(props.language === 'python') { @@ -76,7 +173,7 @@ oneDark, yamlLinter, lintGutter(), - EditorState.readOnly.of(props.readOnly) + autocompletion({ override: [myCompletions] }), ] }) \ No newline at end of file diff --git a/src/frontend/src/components/TableComponent.vue b/src/frontend/src/components/TableComponent.vue index 866f4fb43..737b89cfa 100644 --- a/src/frontend/src/components/TableComponent.vue +++ b/src/frontend/src/components/TableComponent.vue @@ -14,7 +14,7 @@ dense v-model:pagination="pagination" @request="onRequest" - :rows-per-page-options="[5,10,15,20,25,50]" + :rows-per-page-options="[5,10,15,20,25,50,0]" > diff --git a/src/frontend/src/views/CreatePluginFile.vue b/src/frontend/src/views/CreatePluginFile.vue index e93cae5aa..2f1d6fc23 100644 --- a/src/frontend/src/views/CreatePluginFile.vue +++ b/src/frontend/src/views/CreatePluginFile.vue @@ -62,11 +62,12 @@
Plugin Tasks {{ `${param.name}` }} @@ -117,7 +118,7 @@ clickable removable @click="handleSelectedParam('edit', props, i, 'outputParams'); showEditParamDialog = true" - @remove="tasks[props.rowIndex].outputParams.splice(i, 1)" + @remove="pluginFile.tasks[props.rowIndex].outputParams.splice(i, 1)" :label="`${param.name}: ${pluginParameterTypes.filter((type) => type.id === param.parameterType)[0]?.name}`" /> + > + Add Input Param + + Add Input Param + + @@ -245,24 +251,22 @@ style="height: 10px" class="q-mr-sm" @click="addOutputParam()" - /> + > + Add Output Param + + Add Output Param + + - - - Add Task - - Add Task - - + /> @@ -281,6 +285,7 @@ title="Plugin Param Types" @request="getPluginParameterTypes" :hideToggleDraft="true" + :hideCreateBtn="true" :hideEditBtn="true" :hideDeleteBtn="true" :disableSelect="true" @@ -304,6 +309,7 @@ color="negative" label="Cancel" class="q-mr-lg" + @click="confirmLeave = true" /> @@ -341,11 +347,20 @@ @updateParam="updateParam" @addParam="addParam" /> + + \ No newline at end of file diff --git a/src/frontend/src/views/EntryPointsView.vue b/src/frontend/src/views/EntryPointsView.vue index dd7480c0d..9d3d61d1c 100644 --- a/src/frontend/src/views/EntryPointsView.vue +++ b/src/frontend/src/views/EntryPointsView.vue @@ -13,6 +13,7 @@ @request="getEntrypoints" ref="tableRef" @editTags="(row) => { editObjTags = row; showTagsDialog = true }" + @create="router.push('/entrypoints/new')" >