diff --git a/notebooks/downstream_adaptation.ipynb b/notebooks/downstream_adaptation.ipynb index 67acc7c..4a5126b 100644 --- a/notebooks/downstream_adaptation.ipynb +++ b/notebooks/downstream_adaptation.ipynb @@ -1,254 +1,59 @@ { "cells": [ - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "# Downstream adaption with MiniMol\n", - "\n", - "This example shows how MiniMol can featurise small molecules that will then serve as an input to another model trained on a small downstream dataset from TDC ADMET. This allows to transfer the knowledge from the pre-trained MiniMol to another task. \n", - "\n", - "Before we start, let's make sure that the TDC package is installed in the environment. The package is quite large, and we assume that a user wouldn't necesserily need it in their work, that's why we don't include it in the dependencies." - ] - }, { "cell_type": "code", "execution_count": 1, "metadata": {}, - "outputs": [], - "source": [ - "# change cuXXX to the cuda driver version installed on your machine\n", - "%pip install torch-sparse torch-cluster torch-scatter -f https://pytorch-geometric.com/whl/torch-2.3.0+cu121.html\n", - "%pip install hydra-core\n", - "%pip install graphium==2.4.7\n", - "%pip install minimol\n", - "%pip install pytdc" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 1: Getting the data\n", - "Next, we will build a predictor for the `HIA Hou` dataset, one of the binary classification benchmarks from TDC ADMET group. HIA stands for human intestinal absorption (HIA), which is related to the ability to absorb a substance through the gastrointestinal system into the bloodstream of the human body.\n", - "\n", - "We then split the data based on molecular scaffolds into training, validation and test sets. " - ] - }, - { - "cell_type": "code", - "execution_count": 2, - "metadata": {}, "outputs": [ { "name": "stderr", "output_type": "stream", "text": [ - "Found local copy...\n", - "generating training, validation splits...\n", - "generating training, validation splits...\n", - "100%|██████████| 461/461 [00:00<00:00, 3648.38it/s]\n" - ] - } - ], - "source": [ - "from tdc.benchmark_group import admet_group\n", - "\n", - "DATASET_NAME = 'HIA_Hou'\n", - "\n", - "admet = admet_group(path=\"admet-data/\")\n", - "\n", - "mols_test = admet.get(DATASET_NAME)['test']\n", - "mols_train, mols_val = admet.get_train_valid_split(benchmark=DATASET_NAME, split_type='scaffold', seed=42)" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "Dataset - HIA_Hou\n", - "\n", - "Val split (58 mols): \n", - " Drug_ID Drug Y\n", - "0 Atracurium.mol COc1ccc(C[C@H]2c3cc(OC)c(OC)cc3CC[N@@+]2(C)CCC... 0\n", - "1 Succinylsulfathiazole O=C(O)CCC(=O)Nc1ccc(S(=O)(=O)Nc2nccs2)cc1 0\n", - "2 Ticarcillin CC1(C)S[C@H]2[C@@H](NC(=O)[C@@H](C(=O)O)c3ccsc... 0\n", - "3 Raffinose.mol OC[C@@H]1O[C@@H](OC[C@@H]2O[C@@H](O[C@]3(CO)O[... 0\n", - "4 Triamcinolone C[C@@]12C=CC(=O)C=C1CC[C@@H]1[C@H]3C[C@@H](O)[... 1\n", - "\n", - "Test split (117 mols): \n", - " Drug_ID Drug Y\n", - "0 Trazodone.mol O=c1n(CCCN2CCN(c3cccc(Cl)c3)CC2)nc2ccccn12 1\n", - "1 Lisuride.mol CCN(CC)C(=O)N[C@H]1C=C2c3cccc4[nH]cc(c34)C[C@@... 1\n", - "2 Methylergonovine.mol CC[C@H](CO)NC(=O)[C@H]1C=C2c3cccc4[nH]cc(c34)C... 1\n", - "3 Methysergide.mol CC[C@H](CO)NC(=O)[C@H]1C=C2c3cccc4c3c(cn4C)C[C... 1\n", - "4 Moclobemide.mol O=C(NCCN1CCOCC1)c1ccc(Cl)cc1 1\n", - "\n", - "Train split (403 mols): \n", - " Drug_ID Drug Y\n", - "0 Guanadrel N=C(N)NC[C@@H]1COC2(CCCCC2)O1 1\n", - "1 Cefmetazole CO[C@@]1(NC(=O)CSCC#N)C(=O)N2C(C(=O)O)=C(CSc3n... 0\n", - "2 Zonisamide.mol NS(=O)(=O)Cc1noc2ccccc12 1\n", - "3 Furosemide.mol NS(=O)(=O)c1cc(Cl)cc(NCc2ccco2)c1C(=O)O 1\n", - "4 Telmisartan.mol CCCc1nc2c(n1Cc1ccc(-c3ccccc3C(=O)O)cc1)=C[C@H]... 1\n", - "\n" + "/home/blazejb/minimol/.minimol/lib/python3.12/site-packages/tqdm/auto.py:21: TqdmWarning: IProgress not found. Please update jupyter and ipywidgets. See https://ipywidgets.readthedocs.io/en/stable/user_install.html\n", + " from .autonotebook import tqdm as notebook_tqdm\n" ] } ], - "source": [ - "print(f\"Dataset - {DATASET_NAME}\\n\")\n", - "print(f\"Val split ({len(mols_val)} mols): \\n{mols_val.head()}\\n\")\n", - "print(f\"Test split ({len(mols_test)} mols): \\n{mols_test.head()}\\n\")\n", - "print(f\"Train split ({len(mols_train)} mols): \\n{mols_train.head()}\\n\")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 2: Generating molecular fingerprints\n", - "Now that we have the splits, we will use MiniMol to embed all molecules. The embedding will be added as an extra column in the dataframe returned by TDC." - ] - }, - { - "cell_type": "code", - "execution_count": null, - "metadata": {}, - "outputs": [], "source": [ "from minimol import Minimol\n", "\n", - "featuriser = Minimol()" - ] - }, - { - "cell_type": "code", - "execution_count": 3, - "metadata": {}, - "outputs": [ - { - "name": "stderr", - "output_type": "stream", - "text": [ - "100%|██████████| 12/12 [00:25<00:00, 2.14s/it]\n", - "100%|██████████| 24/24 [00:01<00:00, 14.06it/s]\n", - "100%|██████████| 81/81 [00:05<00:00, 13.51it/s]\n" - ] - } - ], - "source": [ - "mols_val['Embedding'] = featuriser(list(mols_val['Drug']))\n", - "mols_test['Embedding'] = featuriser(list(mols_test['Drug']))\n", - "mols_train['Embedding'] = featuriser(list(mols_train['Drug']))" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "The model is small, so it took us 6.6 seconds to generate the embeddings for almost 600 molecules. Here is a preview after the new column has been added:" - ] - }, - { - "cell_type": "code", - "execution_count": 6, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - " Drug_ID Drug Y \\\n", - "0 Guanadrel N=C(N)NC[C@@H]1COC2(CCCCC2)O1 1 \n", - "1 Cefmetazole CO[C@@]1(NC(=O)CSCC#N)C(=O)N2C(C(=O)O)=C(CSc3n... 0 \n", - "2 Zonisamide.mol NS(=O)(=O)Cc1noc2ccccc12 1 \n", - "3 Furosemide.mol NS(=O)(=O)c1cc(Cl)cc(NCc2ccco2)c1C(=O)O 1 \n", - "4 Telmisartan.mol CCCc1nc2c(n1Cc1ccc(-c3ccccc3C(=O)O)cc1)=C[C@H]... 1 \n", - "\n", - " Embedding \n", - "0 [tensor(0.2477), tensor(0.1814), tensor(0.4020... \n", - "1 [tensor(0.7070), tensor(0.4123), tensor(1.0127... \n", - "2 [tensor(0.1878), tensor(-0.1408), tensor(0.891... \n", - "3 [tensor(0.1206), tensor(0.3858), tensor(1.5851... \n", - "4 [tensor(1.0168), tensor(1.1367), tensor(2.2483... \n" - ] - } - ], - "source": [ - "print(mols_train.head())" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 3: Training a model\n", - "Now that the molecules are featurised leverging the representation MiniMol learned during its pre-training, we will set up a training and evaluation loop of a simple Multi-Layer Perceptron model using PyTorch.\n", - "\n", - "Let's start by defining a new class for the dataset and then creating a separate dataloader for each split." - ] - }, - { - "cell_type": "code", - "execution_count": 7, - "metadata": {}, - "outputs": [], - "source": [ - "from torch.utils.data import DataLoader, Dataset\n", - " \n", - "class AdmetDataset(Dataset):\n", - " def __init__(self, samples):\n", - " self.samples = samples['Embedding'].tolist()\n", - " self.targets = [float(target) for target in samples['Y'].tolist()]\n", - "\n", - " def __len__(self):\n", - " return len(self.samples)\n", - "\n", - " def __getitem__(self, idx):\n", - " sample = torch.tensor(self.samples[idx])\n", - " target = torch.tensor(self.targets[idx])\n", - " return sample, target\n", + "import os\n", + "import math\n", "\n", - "val_loader = DataLoader(AdmetDataset(mols_val), batch_size=128, shuffle=False)\n", - "test_loader = DataLoader(AdmetDataset(mols_test), batch_size=128, shuffle=False)\n", - "train_loader = DataLoader(AdmetDataset(mols_train), batch_size=32, shuffle=True)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Our model is a simple 3-layer perceptron with batch normalisation and dropout. We also add a residual connection that before the last layer concatates the the input features with the output from the second to last layer." - ] - }, - { - "cell_type": "code", - "execution_count": 24, - "metadata": {}, - "outputs": [], - "source": [ + "import torch\n", "import torch.nn as nn\n", + "import torch.optim as optim\n", "import torch.nn.functional as F\n", + "from torch.optim.lr_scheduler import LambdaLR\n", + "from torch.utils.data import DataLoader, Dataset\n", "\n", + "from tdc.benchmark_group import admet_group\n", "\n", - "class TaskHead(nn.Module):\n", - " def __init__(self):\n", - " super(TaskHead, self).__init__()\n", - " self.dense1 = nn.Linear(512, 512)\n", - " self.dense2 = nn.Linear(512, 512)\n", - " self.final_dense = nn.Linear(1024, 1)\n", - " self.bn1 = nn.BatchNorm1d(512)\n", - " self.bn2 = nn.BatchNorm1d(512)\n", - " self.dropout = nn.Dropout(0.10)\n", + "from contextlib import redirect_stdout, redirect_stderr\n", "\n", - " def forward(self, x):\n", - " original_x = x\n", "\n", + "class MultiTaskModel(nn.Module):\n", + " def __init__(self, hidden_dim=512, input_dim=512, head_hidden_dim=256, dropout=0.1, task_names=None):\n", + " super(MultiTaskModel, self).__init__()\n", + " \n", + " self.dense1 = nn.Linear(input_dim, hidden_dim)\n", + " self.dense2 = nn.Linear(hidden_dim, hidden_dim)\n", + " self.bn1 = nn.BatchNorm1d(hidden_dim)\n", + " self.bn2 = nn.BatchNorm1d(hidden_dim)\n", + " self.dropout = nn.Dropout(dropout)\n", + "\n", + " self.heads = nn.ModuleDict({\n", + " task_name: nn.Sequential(\n", + " nn.Linear(hidden_dim, head_hidden_dim),\n", + " nn.ReLU(),\n", + " nn.Dropout(dropout),\n", + " nn.Linear(head_hidden_dim, 1)\n", + " ) for task_name in task_names\n", + " })\n", + "\n", + " self.trunk_frozen = False\n", + "\n", + " def forward(self, x, task_name):\n", " x = self.dense1(x)\n", " x = self.bn1(x)\n", " x = F.relu(x)\n", @@ -259,513 +64,183 @@ " x = F.relu(x)\n", " x = self.dropout(x)\n", "\n", - " x = torch.cat((x, original_x), dim=1)\n", - " x = self.final_dense(x)\n", - " \n", - " return x" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Below we declare the basic hyperparamters, optimiser, loss function and learning rate scheduler. We build a model factory that allows us to instatiate a fresh copy of everything, which will become useful later." - ] - }, - { - "cell_type": "code", - "execution_count": 22, - "metadata": {}, - "outputs": [], - "source": [ - "import math\n", - "import torch.nn as nn\n", - "import torch.optim as optim\n", - "from torch.optim.lr_scheduler import LambdaLR\n", - "\n", - "lr = 0.0003\n", - "epochs = 25\n", - "warmup = 5\n", - "\n", - "loss_fn = nn.BCELoss()\n", + " x = self.heads[task_name](x)\n", + " return x\n", + "\n", + " def freeze_trunk(self):\n", + " self.trunk_frozen = True\n", + " for param in self.dense1.parameters():\n", + " param.requires_grad = False\n", + " for param in self.dense2.parameters():\n", + " param.requires_grad = False\n", + " for param in self.bn1.parameters():\n", + " param.requires_grad = False\n", + " for param in self.bn2.parameters():\n", + " param.requires_grad = False\n", + "\n", + " def unfreeze_trunk(self):\n", + " self.trunk_frozen = False\n", + " for param in self.dense1.parameters():\n", + " param.requires_grad = True\n", + " for param in self.dense2.parameters():\n", + " param.requires_grad = True\n", + " for param in self.bn1.parameters():\n", + " param.requires_grad = True\n", + " for param in self.bn2.parameters():\n", + " param.requires_grad = True\n", + "\n", + "\n", + "\n", + "def model_factory(lr=3e-3, epochs=25, warmup=5, weight_decay=1e-4):\n", + " model = MultiTaskModel()\n", + " optimiser = optim.adam(model.parameters(), lr=lr, weight_decay=weight_decay)\n", "\n", - "def model_factory():\n", - " model = TaskHead()\n", - " optimiser = optim.Adam(model.parameters(), lr=lr, weight_decay=0.0001)\n", - " \n", " def lr_fn(epoch):\n", " if epoch < warmup: return epoch / warmup\n", " else: return (1 + math.cos(math.pi * (epoch - warmup) / (epochs - warmup))) / 2\n", "\n", " lr_scheduler = LambdaLR(optimiser, lr_lambda=lr_fn)\n", - " return model, optimiser, lr_scheduler" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "For evaluation we will use both AUROC and Average Precision metrics. The reported loss would be an average across all samples in the epoch." - ] - }, - { - "cell_type": "code", - "execution_count": 25, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "from sklearn.metrics import roc_auc_score, average_precision_score\n", + " return model, optimiser, lr_scheduler\n", "\n", - "def evaluate(predictor, dataloader, loss_fn):\n", + "\n", + "def evaluate(predictor, task, eval_type='val'):\n", " predictor.eval()\n", " total_loss = 0\n", - " all_probs = []\n", - " all_targets = []\n", + "\n", + " dataloader = task.val_dataloader if eval_type == 'val' else task.test_dataloader\n", "\n", " with torch.no_grad():\n", " for inputs, targets in dataloader:\n", - " probs = torch.sigmoid(predictor(inputs).squeeze())\n", - " loss = loss_fn(probs, targets)\n", + " logits = predictor(inputs, task_name=task.name).squeeze()\n", + " loss = task.get_loss(logits, targets)\n", " total_loss += loss.item()\n", - " all_probs.extend(probs.tolist())\n", - " all_targets.extend(targets.tolist())\n", "\n", - " loss = total_loss / len(all_probs)\n", + " loss = total_loss / len(dataloader)\n", " \n", - " return (\n", - " loss,\n", - " roc_auc_score(all_targets, all_probs),\n", - " average_precision_score(all_targets, all_probs)\n", - " )" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Training is a rather standard boilerplate loop: " - ] - }, - { - "cell_type": "code", - "execution_count": 26, - "metadata": {}, - "outputs": [], - "source": [ - "def train_one_epoch(predictor, train_loader, val_loader, optimiser, lr_scheduler, loss_fn, epoch, eval=True):\n", - " predictor.train() \n", - " train_loss = 0\n", - " \n", - " lr_scheduler.step(epoch)\n", - " \n", - " for inputs, targets in train_loader:\n", - " optimiser.zero_grad()\n", - " probs = torch.sigmoid(predictor(inputs).squeeze())\n", - " loss = loss_fn(probs, targets)\n", - " loss.backward()\n", - " optimiser.step()\n", - " train_loss += loss.item()\n", + " return loss\n", "\n", - " train_loss /= (len(train_loader) * train_loader.batch_size)\n", - "\n", - " if eval:\n", - " val_loss, auroc, avpr = evaluate(predictor, val_loader, loss_fn)\n", - " print(\n", - " f\"## Epoch {epoch+1}\\t\"\n", - " f\"train_loss: {train_loss:.4f}\\t\"\n", - " f\"val_loss: {val_loss:.4f}\\t\"\n", - " f\"val_auroc: {auroc:.4f}\\t\"\n", - " f\"val_avpr: {avpr:.4f}\"\n", - " )\n", - " return predictor" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "And now, let's see how good our model gets after training... 🚀" - ] - }, - { - "cell_type": "code", - "execution_count": 27, - "metadata": {}, - "outputs": [ - { - "name": "stdout", - "output_type": "stream", - "text": [ - "## Epoch 0\ttrain_loss: ------\tval_loss: 0.0132\tval_auroc: 0.6979\tval_avpr: 0.9076\n", - "## Epoch 1\ttrain_loss: 0.0208\tval_loss: 0.0131\tval_auroc: 0.6479\tval_avpr: 0.8884\n", - "## Epoch 2\ttrain_loss: 0.0183\tval_loss: 0.0102\tval_auroc: 0.7792\tval_avpr: 0.9384\n", - "## Epoch 3\ttrain_loss: 0.0126\tval_loss: 0.0069\tval_auroc: 0.9208\tval_avpr: 0.9792\n", - "## Epoch 4\ttrain_loss: 0.0077\tval_loss: 0.0052\tval_auroc: 0.9542\tval_avpr: 0.9893\n", - "## Epoch 5\ttrain_loss: 0.0052\tval_loss: 0.0042\tval_auroc: 0.9667\tval_avpr: 0.9927\n", - "## Epoch 6\ttrain_loss: 0.0037\tval_loss: 0.0038\tval_auroc: 0.9708\tval_avpr: 0.9938\n", - "## Epoch 7\ttrain_loss: 0.0026\tval_loss: 0.0037\tval_auroc: 0.9562\tval_avpr: 0.9899\n", - "## Epoch 8\ttrain_loss: 0.0022\tval_loss: 0.0034\tval_auroc: 0.9604\tval_avpr: 0.9909\n", - "## Epoch 9\ttrain_loss: 0.0015\tval_loss: 0.0037\tval_auroc: 0.9542\tval_avpr: 0.9887\n", - "## Epoch 10\ttrain_loss: 0.0011\tval_loss: 0.0029\tval_auroc: 0.9771\tval_avpr: 0.9951\n", - "## Epoch 11\ttrain_loss: 0.0010\tval_loss: 0.0027\tval_auroc: 0.9833\tval_avpr: 0.9965\n", - "## Epoch 12\ttrain_loss: 0.0007\tval_loss: 0.0026\tval_auroc: 0.9833\tval_avpr: 0.9966\n", - "## Epoch 13\ttrain_loss: 0.0006\tval_loss: 0.0030\tval_auroc: 0.9792\tval_avpr: 0.9955\n", - "## Epoch 14\ttrain_loss: 0.0008\tval_loss: 0.0031\tval_auroc: 0.9771\tval_avpr: 0.9951\n", - "## Epoch 15\ttrain_loss: 0.0005\tval_loss: 0.0027\tval_auroc: 0.9771\tval_avpr: 0.9951\n", - "## Epoch 16\ttrain_loss: 0.0006\tval_loss: 0.0026\tval_auroc: 0.9813\tval_avpr: 0.9960\n", - "## Epoch 17\ttrain_loss: 0.0006\tval_loss: 0.0028\tval_auroc: 0.9792\tval_avpr: 0.9955\n", - "## Epoch 18\ttrain_loss: 0.0005\tval_loss: 0.0026\tval_auroc: 0.9813\tval_avpr: 0.9960\n", - "## Epoch 19\ttrain_loss: 0.0005\tval_loss: 0.0025\tval_auroc: 0.9813\tval_avpr: 0.9960\n", - "## Epoch 20\ttrain_loss: 0.0005\tval_loss: 0.0026\tval_auroc: 0.9813\tval_avpr: 0.9960\n", - "## Epoch 21\ttrain_loss: 0.0004\tval_loss: 0.0027\tval_auroc: 0.9792\tval_avpr: 0.9955\n", - "## Epoch 22\ttrain_loss: 0.0004\tval_loss: 0.0027\tval_auroc: 0.9813\tval_avpr: 0.9960\n", - "## Epoch 23\ttrain_loss: 0.0004\tval_loss: 0.0028\tval_auroc: 0.9750\tval_avpr: 0.9946\n", - "## Epoch 24\ttrain_loss: 0.0004\tval_loss: 0.0027\tval_auroc: 0.9792\tval_avpr: 0.9955\n", - "## Epoch 25\ttrain_loss: 0.0004\tval_loss: 0.0026\tval_auroc: 0.9813\tval_avpr: 0.9960\n", - "test_loss: 0.0015\n", - "test_auroc: 0.9951\n", - "test_avpr: 0.9986\n" - ] - } - ], - "source": [ - "model, optimiser, lr_scheduler = model_factory()\n", - "\n", - "val_loss, val_auroc, val_avpr = evaluate(model, val_loader, loss_fn)\n", - "print(\n", - " f\"## Epoch 0\\t\"\n", - " f\"train_loss: ------\\t\"\n", - " f\"val_loss: {val_loss:.4f}\\t\"\n", - " f\"val_auroc: {val_auroc:.4f}\\t\"\n", - " f\"val_avpr: {val_avpr:.4f}\"\n", - ")\n", - "\n", - "for epoch in range(epochs):\n", - " model = train_one_epoch(model, train_loader, val_loader, optimiser, lr_scheduler, loss_fn, epoch)\n", - "\n", - "test_loss, test_auroc, test_avpr = evaluate(model, test_loader, loss_fn)\n", - "print(\n", - " f\"test_loss: {test_loss:.4f}\\n\"\n", - " f\"test_auroc: {test_auroc:.4f}\\n\"\n", - " f\"test_avpr: {test_avpr:.4f}\"\n", - ")" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Running on a server-grade machine with 128 CPUs, the training took just 1.6s, reaching AUROC on the test set of 0.9951. As for the summer 2024, this is better than SoTA of 0.989. Pretty good!" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "## Step 4: Improvements\n", "\n", - "The result can be further improved. One problem is that the accuracy is quite sensitive to both the train-val splitting (reminder - we use scaffold splitting strategy) and the weight initialisation. Let's visualise the distribution of validation scores by training a few models:" - ] - }, - { - "cell_type": "code", - "execution_count": 29, - "metadata": {}, - "outputs": [], - "source": [ - "def dataloader_factory(seed):\n", - " mols_train, mols_val = admet.get_train_valid_split(benchmark=DATASET_NAME, split_type='scaffold', seed=seed)\n", + "def evaluate_ensemble(predictors, dataloader, task):\n", + " predictions = []\n", + " with torch.no_grad():\n", + " \n", + " for inputs, _ in dataloader:\n", + " ensemble_logits = [predictor(inputs).squeeze() for predictor in predictors]\n", + " averaged_logits = torch.mean(torch.stack(ensemble_logits), dim=0)\n", + " if task == 'classification':\n", + " predictions += torch.sigmoid(averaged_logits)\n", + " else:\n", + " predictions += averaged_logits\n", "\n", - " mols_val['Embedding'] = featuriser(list(mols_val['Drug']))\n", - " mols_train['Embedding'] = featuriser(list(mols_train['Drug']))\n", + " return predictions\n", "\n", - " val_loader = DataLoader(AdmetDataset(mols_val), batch_size=128, shuffle=False)\n", - " train_loader = DataLoader(AdmetDataset(mols_train), batch_size=32, shuffle=True)\n", "\n", - " return val_loader, train_loader" - ] - }, - { - "cell_type": "code", - "execution_count": 31, - "metadata": {}, - "outputs": [ - { - "data": { - "image/png": "iVBORw0KGgoAAAANSUhEUgAAAioAAAHHCAYAAACRAnNyAAAAOXRFWHRTb2Z0d2FyZQBNYXRwbG90bGliIHZlcnNpb24zLjkuMSwgaHR0cHM6Ly9tYXRwbG90bGliLm9yZy/TGe4hAAAACXBIWXMAAA9hAAAPYQGoP6dpAAA/jUlEQVR4nO3dd3gU5f7//9eSkA0t1IQQSkJJgBCkCnpA6b0LGhCkq0dBQEAUvA6CStMDggUQD4QuvfgBBEM/Khw6KkivSglSEmogyf39g1/255IEkiXJDvB8XNdeMLP3zLzn3vbKzD27NmOMEQAAgAVlcXcBAAAAKSGoAAAAyyKoAAAAyyKoAAAAyyKoAAAAyyKoAAAAyyKoAAAAyyKoAAAAyyKoAAAAyyKoPMKGDRsmm82WKduqXbu2ateu7ZjeuHGjbDabFi1alCnb79q1q4KCgjJlW666du2aevbsKX9/f9lsNvXr18/dJSENgoKC1LVrV3eXYUn39k3i63/jxo0PXPbe9470kJnvfRnp3r45ceKEbDabpk+f7raarIigYhHTp0+XzWZz3Ly9vRUQEKBGjRrp888/19WrV9NlO2fOnNGwYcO0Z8+edFlferJybakxcuRITZ8+XW+88YZmzZqlV1555YHLxMfHKyAgQDabTd9//32ybbp27aqcOXOmuI6cOXMm+yGSePPw8JCfn5/atWun33//PcX1rFixQo0bN1b+/Pnl7e2tkJAQDRw4UBcvXkxxmY0bN+qFF16Qv7+/vLy85OfnpxYtWmjJkiUP3Her279/v4YNG6YTJ064u5Qn0o0bNzRs2LBUhaHH2apVqzRs2DB3l+FeBpYQERFhJJkPP/zQzJo1y0ybNs2MHDnSNGzY0NhsNhMYGGj27t3rtMydO3fMzZs307Sd7du3G0kmIiIiTcvFxsaa2NhYx/SGDRuMJLNw4cI0rcfV2m7fvm1u3bqVbtvKCNWrVzc1atRI0zI//PCDkWSCgoJMx44dk23TpUsXkyNHjhTXkSNHDtOlSxfHdOJj06dPH8dzqV+/fsbb29vkz5/fnD17Nsk6BgwYYCSZChUqmDFjxphvvvnGvPHGG8Zut5vChQubAwcOJFlm6NChRpIJDg42Q4cONVOnTjWffPKJqV27tpFk5syZk6a+cLfAwECnfly4cKGRZDZs2OC2mqzi3r6Jj483N2/eNPHx8Q9ctlatWqZWrVpp3uaFCxeMJPPBBx8kuc+V9z4rurdvEhISzM2bN01cXJxjXq9evcyT/lHt6Z54hJQ0adJEVatWdUwPHjxY69evV/PmzdWyZUv9/vvvypYtmyTJ09NTnp4Z+xDeuHFD2bNnl5eXV4Zu50GyZs3q1u2nRlRUlEJDQ9O0zOzZs1W5cmV16dJFQ4YM0fXr15UjR450qee5555Tu3btHNOlS5fWG2+8oZkzZ2rQoEGO+d9++63Gjh2r8PBwzZkzRx4eHo77unbtqjp16ujFF1/Url27HM+3RYsW6cMPP1S7du00d+5cp8fnnXfe0Zo1a3Tnzp001ZuQkKDbt2/L29vb1V1GJsmSJYtbH6fMeO9zh8Sj6XDGqZ9HQN26dfWvf/1LJ0+e1OzZsx3zkztPGxkZqZo1aypPnjzKmTOnSpcurSFDhki6e5j+6aefliR169bNcWog8Xxo7dq1FRYWpp07d+r5559X9uzZHcumdJ45Pj5eQ4YMkb+/v3LkyKGWLVvq9OnTTm1SOvf/93U+qLbkxqhcv35dAwYMUNGiRWW321W6dGn9+9//lrnnB8FtNpt69+6tZcuWKSwsTHa7XeXKldPq1auT7/B7REVFqUePHipYsKC8vb1VoUIFzZgxw3F/4qmW48ePa+XKlY7aH3TK4ObNm1q6dKnat2+vl156STdv3tTy5ctTVZMrnnvuOUnS0aNHneYPHz5cefPm1ZQpU5xCiiRVq1ZN7777rn799Ven8Uj/+te/lC9fPk2bNi3ZENmoUSM1b978vvUkPi5z5sxRuXLlZLfbHY/Jn3/+qe7du6tgwYKOx2vatGlJ1vHFF1+oXLlyyp49u/LmzauqVatq7ty5jvtTGtv0oDEO06dP14svvihJqlOnjuMxTTwNsWPHDjVq1EgFChRQtmzZVLx4cXXv3v2++5to4sSJjv0NCAhQr169dOXKFac2ia/F/fv3q06dOsqePbsKFy6sTz755IHrDwsLU506dZLMT0hIUOHChZ3C67///W/94x//UP78+ZUtWzZVqVIlVePOUhqjMmXKFJUsWVLZsmVTtWrV9N///jfJsrdv39bQoUNVpUoV5c6dWzly5NBzzz2nDRs2ONqcOHFCvr6+ku4+PxP7P/EUSHKPX1xcnD766COVLFlSdrtdQUFBGjJkiGJjY53aBQUFqXnz5vrxxx9VrVo1eXt7q0SJEpo5c+YD91uS5s2bpypVqihXrlzy8fFR+fLlNWHCBMf9iafxN2/erNdff1358+eXj4+POnfurMuXL9933feOUenatau++uorSXI6nfukIag8IhLHO/zwww8pttm3b5+aN2+u2NhYffjhhxo7dqxatmypn376SZJUtmxZffjhh5Kk1157TbNmzdKsWbP0/PPPO9Zx8eJFNWnSRBUrVtT48eOTfcP7uxEjRmjlypV699131adPH0VGRqp+/fq6efNmmvYvNbX9nTFGLVu21GeffabGjRtr3LhxKl26tN555x31798/Sfsff/xRb775ptq3b69PPvlEt27dUtu2be87/kK6GyZq166tWbNmqWPHjvr000+VO3dude3a1fHmVLZsWc2aNUsFChRQxYoVHbUnvtGm5LvvvtO1a9fUvn17+fv7q3bt2pozZ05qusslicEpb968jnmHDx/WwYMH1apVK/n4+CS7XOfOnSXdHcOSuMyBAwfUunVr5cqV66FqWr9+vd5++22Fh4drwoQJCgoK0vnz5/XMM89o7dq16t27tyZMmKBSpUqpR48eGj9+vGPZb775Rn369FFoaKjGjx+v4cOHq2LFivrf//73UDVJ0vPPP68+ffpIkoYMGeJ4TMuWLauoqCg1bNhQJ06c0HvvvacvvvhCHTt21NatWx+43mHDhqlXr14KCAjQ2LFj1bZtW3399ddq2LBhkiNQly9fVuPGjVWhQgWNHTtWZcqU0bvvvpviWKZE4eHh2rx5s86dO+c0/8cff9SZM2fUvn17x7wJEyaoUqVK+vDDDzVy5Eh5enrqxRdf1MqVK1PbVQ5Tp07V66+/Ln9/f33yySeqUaNGsn+4xMTE6D//+Y9q166tMWPGaNiwYbpw4YIaNWrkGJ/m6+urSZMmSZLatGnj6P8XXnghxe337NlTQ4cOVeXKlfXZZ5+pVq1aGjVqlNP+Jjpy5IjatWunBg0aaOzYscqbN6+6du2qffv23XcfIyMj1aFDB+XNm1djxozR6NGjVbt2bcd77N/17t1bv//+u4YNG6bOnTtrzpw5at26dZI/pO7n9ddfV4MGDSTJ0QezZs1K9fKPDTefesL/J3GMyvbt21Nskzt3blOpUiXH9AcffOB07vKzzz4zksyFCxdSXMf9xoHUqlXLSDKTJ09O9r6/n0tNHAdRuHBhExMT45i/YMECI8lMmDDBMe/e89sprfN+tXXp0sUEBgY6ppctW2YkmY8//tipXbt27YzNZjNHjhxxzJNkvLy8nObt3bvXSDJffPFFkm393fjx440kM3v2bMe827dvm2effdbkzJnTad8DAwNNs2bN7ru+v2vevLnTmJYpU6YYT09PExUV5dTO1TEq06ZNMxcuXDBnzpwxq1evNqVKlTI2m81s27bN0TaxHz/77LP71urj42MqV65sjDFm+fLlqVrmQSSZLFmymH379jnN79GjhylUqJD566+/nOa3b9/e5M6d29y4ccMYY0yrVq1MuXLl7ruNe583ie597RiT+jEqS5cufeBrNTlRUVHGy8vLNGzY0Glsx5dfful4vBIlvhZnzpzpmBcbG2v8/f1N27Zt77udgwcPJvvcfvPNN03OnDkd/WeMcfq/MXef22FhYaZu3bpO8+/tm8TnWGLf3L592/j5+ZmKFSs6jWWbMmWKkeT0Oo+Li3NqY4wxly9fNgULFjTdu3d3zLvfGJV7H789e/YYSaZnz55O7QYOHGgkmfXr1zvtiySzefNmx7yoqChjt9vNgAEDkmzr7/r27Wt8fHycxpDcK/G9vEqVKub27duO+Z988omRZJYvX+6Yd+974PHjx5O8BzJGxRiOqDxCcubMed+rf/LkySNJWr58uRISElzaht1uV7du3VLdvnPnzk5/Vbdr106FChXSqlWrXNp+aq1atUoeHh6Ov3oTDRgwQMaYJH911q9fXyVLlnRMP/XUU/Lx8dGxY8ceuB1/f3916NDBMS9r1qzq06ePrl27pk2bNrlU/8WLF7VmzRqn9bZt21Y2m00LFixwaZ336t69u3x9fRUQEKDGjRsrOjpas2bNcpxik+R4Pj3oyEiuXLkUExMjSY5/H/ZoiiTVqlXLaVyPMUaLFy9WixYtZIzRX3/95bg1atRI0dHR2rVrl6S7z/c//vhD27dvf+g60iLxdbZixYo0jcNZu3atbt++rX79+ilLlv//rffVV1+Vj49PkqMYOXPmVKdOnRzTXl5eqlat2gOfsyEhIapYsaLmz5/vmBcfH69FixapRYsWjjFukpz+f/nyZUVHR+u5555z9HFq7dixQ1FRUfrnP//pNJ6ta9euyp07t1NbDw8PR5uEhARdunRJcXFxqlq1apq3myjx/ebeo6kDBgyQpCR9Gxoa6jgVKt09glO6dOkH9m2ePHl0/fp1RUZGPrCm1157zem06BtvvCFPT88Mf298HBFUHiHXrl2774dDeHi4atSooZ49e6pgwYJq3769FixYkKbQUrhw4TQNnA0ODnaattlsKlWqVIZf0nny5EkFBAQk6Y+yZcs67v+7YsWKJVlH3rx5H3jO+OTJkwoODnb6YLnfdlJr/vz5unPnjipVqqQjR47oyJEjunTpkqpXr+7S6Z/kzlsPHTpUkZGRWrp0qTp37qzo6Ogk+5HYfw+6/P3q1auOtomniNLjkvnixYs7TV+4cEFXrlzRlClT5Ovr63RLDNBRUVGSpHfffVc5c+ZUtWrVFBwcrF69eiV7CD691apVS23bttXw4cNVoEABtWrVShEREUnGQtwr8blSunRpp/leXl4qUaJEkudSkSJFkjyuqXnOSnffC3766Sf9+eefku6OKYmKilJ4eLhTuxUrVuiZZ56Rt7e38uXL5zjlEh0d/cBtJLdv974fZM2aVSVKlEjSfsaMGXrqqafk7e2t/Pnzy9fXVytXrkzzdv++/SxZsqhUqVJO8/39/ZUnT550ez948803FRISoiZNmqhIkSLq3r17imPd7u2LnDlzqlChQlzu7gKCyiPijz/+UHR0dJIX4t9ly5ZNmzdv1tq1a/XKK6/ol19+UXh4uBo0aKD4+PhUbefvf2Gll5QGf6W2pvRw7yDRRCYN54vTU2IYqVGjhoKDgx23H3/8UVu2bHH6y87b21uxsbHJ1mqM0a1bt5K9UqB8+fKqX7++WrdurRkzZqhly5Z69dVXncYMJAauX375JcVaT548qZiYGMeRjzJlykiSfv31Vxf23Nm9z7fEUN2pUydFRkYme6tRo4aj9oMHD2revHmqWbOmFi9erJo1a+qDDz5wrC8jnnuJX3S4ZcsW9e7d2zHwt0qVKrp27ZrL673Xwzxnw8PDZYzRwoULJUkLFixQ7ty51bhxY0eb//73v2rZsqW8vb01ceJErVq1SpGRkXr55Zcz9HUxe/Zsde3aVSVLltTUqVO1evVqRUZGqm7dui4fCU6U2oGmrvatn5+f9uzZo++++04tW7bUhg0b1KRJE3Xp0iXNtSL1CCqPiMQBVI0aNbpvuyxZsqhevXoaN26c9u/frxEjRmj9+vWOEfXpPWL88OHDTtPGGB05csTpSou8efMmuapBSno0Ii21BQYG6syZM0n+qj9w4IDj/vQQGBiow4cPJ3kDfZjtHD9+XD///LN69+6thQsXOt3mz58vLy8vpytXAgMDFRcXl+RqHenuoMD4+PhU1TF69GjdunVLI0aMcMwLCQlRSEiIli1bluIRksSrIRKv4gkJCVHp0qW1fPnydP1glu4egs+VK5fi4+NVv379ZG9+fn6O9jly5FB4eLgiIiJ06tQpNWvWTCNGjNCtW7ckpf65l5wHPR+feeYZjRgxQjt27NCcOXO0b98+zZs3L8X2iY/RwYMHnebfvn1bx48fT7fnrHT3SFW1atU0f/58xcXFacmSJWrdurXsdrujzeLFi+Xt7a01a9aoe/fuatKkierXr+/S9hJrv/f94M6dOzp+/LjTvEWLFqlEiRJasmSJXnnlFTVq1Ej169d3PGaJ0vp+kJCQkGT758+f15UrV9K1b728vNSiRQtNnDhRR48e1euvv66ZM2fqyJEjTu3ureXatWs6e/Zsmr9h+0m8yudeBJVHwPr16/XRRx+pePHi6tixY4rtLl26lGRexYoVJclxWDrxOzqSe/N2xcyZM50+4BYtWqSzZ8+qSZMmjnklS5bU1q1bdfv2bce8FStWJLkaIC21NW3aVPHx8fryyy+d5n/22Wey2WxO238YTZs21blz55zO98fFxemLL75Qzpw5VatWrTSvM/FoyqBBg9SuXTun20svvaRatWo5nf5J3Jd791WS49LF1OxvyZIl1bZtW02fPt3pipChQ4fq8uXL+uc//5nkSMPOnTs1ZswYhYWFqW3bto75w4cP18WLF9WzZ0/FxcUl2dYPP/zguEooLTw8PNS2bVstXrxYv/32W5L7L1y44Pj/vVdseXl5KTQ0VMYYx9iRkiVLKjo62umI0dmzZ7V06dIH1pLS8/Hy5ctJ/vK+93WWnPr168vLy0uff/650/JTp05VdHS0mjVr9sCa0iI8PFxbt27VtGnT9NdffyU57ePh4SGbzeb0mJ84cULLli1L87aqVq0qX19fTZ482el1Pn369CT9l3g04+998L///U9btmxxapc9e3ZJqX8/kOR0VZgkjRs3TpLSrW/vfc5lyZJFTz31lKSkj/2UKVOcxjBNmjRJcXFxaX5vSu/37EfR4/eNOY+477//XgcOHFBcXJzOnz+v9evXKzIyUoGBgfruu+/u+2VAH374oTZv3qxmzZopMDBQUVFRmjhxoooUKaKaNWtKuvvGnSdPHk2ePFm5cuVSjhw5VL169SRjBVIrX758qlmzprp166bz589r/PjxKlWqlF599VVHm549e2rRokVq3LixXnrpJR09elSzZ892Gtya1tpatGihOnXq6P3339eJEydUoUIF/fDDD1q+fLn69euXZN2ueu211/T111+ra9eu2rlzp4KCgrRo0SL99NNPGj9+vEsDSufMmaOKFSuqaNGiyd7fsmVLvfXWW9q1a5cqV66sihUrqmfPnpowYYIOHz7suFwxMjJSq1atUs+ePVWhQoVUbfudd97RggULNH78eI0ePVqS1LFjR23fvl0TJkzQ/v371bFjR+XNm1e7du3StGnTlD9/fi1atMhpYGB4eLh+/fVXjRgxQrt371aHDh0UGBioixcvavXq1Vq3bp3TUaG0GD16tDZs2KDq1avr1VdfVWhoqC5duqRdu3Zp7dq1jkDesGFD+fv7q0aNGipYsKB+//13ffnll2rWrJnjcWnfvr3effddtWnTRn369NGNGzc0adIkhYSEPHDgZsWKFeXh4aExY8YoOjpadrtddevW1dy5czVx4kS1adNGJUuW1NWrV/XNN9/Ix8fH8YGZHF9fXw0ePFjDhw9X48aN1bJlSx08eFATJ07U008/7TRwNj289NJLGjhwoAYOHKh8+fIlOVrSrFkzjRs3To0bN9bLL7+sqKgoffXVVypVqtR9TwUmJ2vWrPr444/1+uuvq27dugoPD9fx48cVERGRZIxK8+bNtWTJErVp00bNmjXT8ePHNXnyZIWGhjodocuWLZtCQ0M1f/58hYSEKF++fAoLC1NYWFiS7VeoUEFdunTRlClTdOXKFdWqVUvbtm3TjBkz1Lp16wd+zUJq9ezZU5cuXVLdunVVpEgRnTx5Ul988YUqVqzoOI2a6Pbt26pXr55eeuklx+Ncs2ZNtWzZMk3brFKliiSpT58+atSokTw8PJK95Pqx5oYrjZCMxEvaEm9eXl7G39/fNGjQwEyYMMHpMthE916it27dOtOqVSsTEBBgvLy8TEBAgOnQoYM5dOiQ03LLly83oaGhxtPT0+lSuFq1aqV4uWdKlyd/++23ZvDgwcbPz89ky5bNNGvWzJw8eTLJ8mPHjjWFCxc2drvd1KhRw+zYsSPZr9ZOqbbkLjO9evWqefvtt01AQIDJmjWrCQ4ONp9++qlJSEhwaifJ9OrVK0lNKV02fa/z58+bbt26mQIFChgvLy9Tvnz5ZC+hTs3lyTt37jSSzL/+9a8U25w4ccJIMm+//bZjXnx8vJkwYYKpUKGC8fb2Nt7e3qZChQrm888/T/I15g/6eYPatWsbHx8fc+XKFaf5y5YtMw0aNDB58+Y1drvdlCpVygwYMOC+l7snPuf8/PyMp6en8fX1NS1atHC6BDMlKT0uxtzt8169epmiRYuarFmzGn9/f1OvXj0zZcoUR5uvv/7aPP/88yZ//vzGbrebkiVLmnfeecdER0c7reuHH34wYWFhxsvLy5QuXdrMnj07VZcnG2PMN998Y0qUKGE8PDwcl+Pu2rXLdOjQwRQrVszY7Xbj5+dnmjdvbnbs2PHAfTbm7uXIZcqUMVmzZjUFCxY0b7zxhrl8+bJTm5Reiyldbp2SGjVqJHvZbqKpU6ea4OBgY7fbTZkyZUxERESq+ubey5MTTZw40RQvXtzY7XZTtWpVs3nz5mS/Jn7kyJEmMDDQ2O12U6lSJbNixYpk9+3nn382VapUMV5eXk6XKidX4507d8zw4cNN8eLFTdasWU3RokXN4MGDk/z0Rkqv09R81f+iRYtMw4YNjZ+fn/Hy8jLFihUzr7/+utPPUiS+l2/atMm89tprJm/evCZnzpymY8eO5uLFi/fdZnKXJ8fFxZm33nrL+Pr6GpvN9kReqmwzxk2jCQEAeMxMnz5d3bp10/bt251+DgWuY4wKAACwLIIKAACwLIIKAACwLMaoAAAAy+KICgAAsCyCCgAAsKxH+gvfEhISdObMGeXKlYuvGQYA4BFhjNHVq1cVEBCQ5MdS7/VIB5UzZ86k+O2eAADA2k6fPq0iRYrct80jHVQSvyb79OnTjp+eBwAA1hYTE6OiRYum6mdIHumgkni6x8fHh6ACAMAjJjXDNhhMCwAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALMutQSUoKEg2my3JrVevXu4sCwAAWIRbf+tn+/btio+Pd0z/9ttvatCggV588UU3VgUAAKzCrUHF19fXaXr06NEqWbKkatWq5aaKAACAlVhmjMrt27c1e/Zsde/ePVW/pggAAB5/bj2i8nfLli3TlStX1LVr1xTbxMbGKjY21jEdExOTCZUBAAB3sUxQmTp1qpo0aaKAgIAU24waNUrDhw/PxKoAALCuoPdWZvg2ToxuluHbuB9LnPo5efKk1q5dq549e9633eDBgxUdHe24nT59OpMqBAAA7mCJIyoRERHy8/NTs2b3T212u112uz2TqgIAAO7m9iMqCQkJioiIUJcuXeTpaYncBAAALMLtQWXt2rU6deqUunfv7u5SAACAxbj9EEbDhg1ljHF3GQAAwILcfkQFAAAgJQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWQQVAABgWW4PKn/++ac6deqk/PnzK1u2bCpfvrx27Njh7rIAAIAFeLpz45cvX1aNGjVUp04dff/99/L19dXhw4eVN29ed5YFAAAswq1BZcyYMSpatKgiIiIc84oXL+7GigAAgJW49dTPd999p6pVq+rFF1+Un5+fKlWqpG+++SbF9rGxsYqJiXG6AQCAx5dbj6gcO3ZMkyZNUv/+/TVkyBBt375dffr0kZeXl7p06ZKk/ahRozR8+HA3VAoAeFwEvbfS3SUgDWzGGOOujXt5ealq1ar6+eefHfP69Omj7du3a8uWLUnax8bGKjY21jEdExOjokWLKjo6Wj4+PplSMwDg0UZQSZsTo5ul+zpjYmKUO3fuVH1+u/XUT6FChRQaGuo0r2zZsjp16lSy7e12u3x8fJxuAADg8eXWoFKjRg0dPHjQad6hQ4cUGBjopooAAICVuDWovP3229q6datGjhypI0eOaO7cuZoyZYp69erlzrIAAIBFuDWoPP3001q6dKm+/fZbhYWF6aOPPtL48ePVsWNHd5YFAAAswq1X/UhS8+bN1bx5c3eXAQAALMjtX6EPAACQEoIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLIIKAACwLLcGlWHDhslmszndypQp486SAACAhXi6u4By5cpp7dq1jmlPT7eXBAAALMLtqcDT01P+/v7uLgMAAFiQ28eoHD58WAEBASpRooQ6duyoU6dOpdg2NjZWMTExTjcAAPD4cmtQqV69uqZPn67Vq1dr0qRJOn78uJ577jldvXo12fajRo1S7ty5HbeiRYtmcsUAACAz2Ywxxt1FJLpy5YoCAwM1btw49ejRI8n9sbGxio2NdUzHxMSoaNGiio6Olo+PT2aWCgB4RAW9t9LdJTxSToxulu7rjImJUe7cuVP1+e32MSp/lydPHoWEhOjIkSPJ3m+322W32zO5KgAA4C5uH6Pyd9euXdPRo0dVqFAhd5cCAAAswK1BZeDAgdq0aZNOnDihn3/+WW3atJGHh4c6dOjgzrIAAIBFuPXUzx9//KEOHTro4sWL8vX1Vc2aNbV161b5+vq6sywAAGARbg0q8+bNc+fmAQCAxVlqjAoAAMDfEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBlEVQAAIBluRRUjh07lt51AAAAJOFSUClVqpTq1Kmj2bNn69atW+ldEwAAgCQXg8quXbv01FNPqX///vL399frr7+ubdu2pXdtAADgCedSUKlYsaImTJigM2fOaNq0aTp79qxq1qypsLAwjRs3ThcuXEjvOgEAwBPooQbTenp66oUXXtDChQs1ZswYHTlyRAMHDlTRokXVuXNnnT17Nr3qBAAAT6CHCio7duzQm2++qUKFCmncuHEaOHCgjh49qsjISJ05c0atWrVKrzoBAMATyNOVhcaNG6eIiAgdPHhQTZs21cyZM9W0aVNlyXI39xQvXlzTp09XUFBQetYKAACeMC4FlUmTJql79+7q2rWrChUqlGwbPz8/TZ069aGKAwAATzaXgsrhw4cf2MbLy0tdunRxZfUAAACSXByjEhERoYULFyaZv3DhQs2YMeOhiwIAAJBcDCqjRo1SgQIFksz38/PTyJEjH7ooAAAAycWgcurUKRUvXjzJ/MDAQJ06deqhiwIAAJBcDCp+fn765Zdfkszfu3ev8ufP71Iho0ePls1mU79+/VxaHgAAPH5cCiodOnRQnz59tGHDBsXHxys+Pl7r169X37591b59+zSvb/v27fr666/11FNPuVIOAAB4TLkUVD766CNVr15d9erVU7Zs2ZQtWzY1bNhQdevWTfMYlWvXrqljx4765ptvlDdvXlfKAQAAjymXgoqXl5fmz5+vAwcOaM6cOVqyZImOHj2qadOmycvLK03r6tWrl5o1a6b69eu7UgoAAHiMufQ9KolCQkIUEhLi8vLz5s3Trl27tH379lS1j42NVWxsrGM6JibG5W0DAADrcymoxMfHa/r06Vq3bp2ioqKUkJDgdP/69esfuI7Tp0+rb9++ioyMlLe3d6q2O2rUKA0fPtyVkl0S9N7KDN/GidHNMnwbAB5/vF/hceVSUOnbt6+mT5+uZs2aKSwsTDabLc3r2Llzp6KiolS5cmXHvPj4eG3evFlffvmlYmNj5eHh4bTM4MGD1b9/f8d0TEyMihYt6souAACAR4BLQWXevHlasGCBmjZt6vKG69Wrp19//dVpXrdu3VSmTBm9++67SUKKJNntdtntdpe3CQAAHi0uBRUvLy+VKlXqoTacK1cuhYWFOc3LkSOH8ufPn2Q+AAB4Mrl01c+AAQM0YcIEGWPSux4AAAAHl46o/Pjjj9qwYYO+//57lStXTlmzZnW6f8mSJS4Vs3HjRpeWAwAAjyeXgkqePHnUpk2b9K4FAADAiUtBJSIiIr3rAAAASMKlMSqSFBcXp7Vr1+rrr7/W1atXJUlnzpzRtWvX0q04AADwZHPpiMrJkyfVuHFjnTp1SrGxsWrQoIFy5cqlMWPGKDY2VpMnT07vOgEAwBPIpSMqffv2VdWqVXX58mVly5bNMb9NmzZat25duhUHAACebC4dUfnvf/+rn3/+OckPEAYFBenPP/9Ml8IAAABcOqKSkJCg+Pj4JPP/+OMP5cqV66GLAgAAkFwMKg0bNtT48eMd0zabTdeuXdMHH3zwUF+rDwAA8HcunfoZO3asGjVqpNDQUN26dUsvv/yyDh8+rAIFCujbb79N7xoBAMATyqWgUqRIEe3du1fz5s3TL7/8omvXrqlHjx7q2LGj0+BaAACAh+FSUJEkT09PderUKT1rAQAAcOJSUJk5c+Z97+/cubNLxQAAAPydS0Glb9++TtN37tzRjRs35OXlpezZsxNUAABAunDpqp/Lly873a5du6aDBw+qZs2aDKYFAADpxuXf+rlXcHCwRo8eneRoCwAAgKvSLahIdwfYnjlzJj1XCQAAnmAujVH57rvvnKaNMTp79qy+/PJL1ahRI10KAwAAcCmotG7d2mnaZrPJ19dXdevW1dixY9OjLgAAANeCSkJCQnrXAQAAkES6jlEBAABITy4dUenfv3+q244bN86VTQAAALgWVHbv3q3du3frzp07Kl26tCTp0KFD8vDwUOXKlR3tbDZb+lQJAACeSC4FlRYtWihXrlyaMWOG8ubNK+nul8B169ZNzz33nAYMGJCuRQIAgCeTS2NUxo4dq1GjRjlCiiTlzZtXH3/8MVf9AACAdONSUImJidGFCxeSzL9w4YKuXr360EUBAABILgaVNm3aqFu3blqyZIn++OMP/fHHH1q8eLF69OihF154Ib1rBAAATyiXxqhMnjxZAwcO1Msvv6w7d+7cXZGnp3r06KFPP/00XQsEAABPLpeCSvbs2TVx4kR9+umnOnr0qCSpZMmSypEjR7oWBwAAnmwP9YVvZ8+e1dmzZxUcHKwcOXLIGJNedQEAALgWVC5evKh69eopJCRETZs21dmzZyVJPXr04NJkAACQblwKKm+//bayZs2qU6dOKXv27I754eHhWr16dboVBwAAnmwujVH54YcftGbNGhUpUsRpfnBwsE6ePJkuhQEAALh0ROX69etOR1ISXbp0SXa7/aGLAgAAkFwMKs8995xmzpzpmLbZbEpISNAnn3yiOnXqpFtxAADgyebSqZ9PPvlE9erV044dO3T79m0NGjRI+/bt06VLl/TTTz+ld40AAOAJ5dIRlbCwMB06dEg1a9ZUq1atdP36db3wwgvavXu3SpYsmd41AgCAJ1Saj6jcuXNHjRs31uTJk/X+++9nRE0AAACSXDiikjVrVv3yyy8ZUQsAAIATl079dOrUSVOnTk3vWgAAAJy4NJg2Li5O06ZN09q1a1WlSpUkv/Ezbty4dCkOAAA82dIUVI4dO6agoCD99ttvqly5siTp0KFDTm1sNluq1zdp0iRNmjRJJ06ckCSVK1dOQ4cOVZMmTdJSFgAAeEylKagEBwfr7Nmz2rBhg6S7X5n/+eefq2DBgi5tvEiRIho9erSCg4NljNGMGTPUqlUr7d69W+XKlXNpnQAA4PGRpqBy768jf//997p+/brLG2/RooXT9IgRIzRp0iRt3bqVoAIAAFwbo5Lo3uDyMOLj47Vw4UJdv35dzz77bLJtYmNjFRsb65iOiYlJt+0DAADrSVNQsdlsScagpGVMSnJ+/fVXPfvss7p165Zy5syppUuXKjQ0NNm2o0aN0vDhwx9qe3h0Bb230t0lpIsTo5u5uwTAsh6X1znST5pP/XTt2tXxw4O3bt3SP//5zyRX/SxZsiTV6yxdurT27Nmj6OhoLVq0SF26dNGmTZuSDSuDBw9W//79HdMxMTEqWrRoWnYBAAA8QtIUVLp06eI03alTp4cuwMvLS6VKlZIkValSRdu3b9eECRP09ddfJ2lrt9v5dWYAAJ4gaQoqERERGVWHQ0JCgtM4FAAA8OR6qMG0D2vw4MFq0qSJihUrpqtXr2ru3LnauHGj1qxZ486yAACARbg1qERFRalz5846e/ascufOraeeekpr1qxRgwYN3FkWAACwCLcGFX4vCAAA3I9LP0oIAACQGQgqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAsggqAADAstwaVEaNGqWnn35auXLlkp+fn1q3bq2DBw+6syQAAGAhbg0qmzZtUq9evbR161ZFRkbqzp07atiwoa5fv+7OsgAAgEV4unPjq1evdpqePn26/Pz8tHPnTj3//PNuqgoAAFiFW4PKvaKjoyVJ+fLlS/b+2NhYxcbGOqZjYmIypS4AAOAelgkqCQkJ6tevn2rUqKGwsLBk24waNUrDhw/P5MoefUHvrXR3CXgM8bxKvROjm7m7hHTBYw53sMxVP7169dJvv/2mefPmpdhm8ODBio6OdtxOnz6diRUCAIDMZokjKr1799aKFSu0efNmFSlSJMV2drtddrs9EysDAADu5NagYozRW2+9paVLl2rjxo0qXry4O8sBAAAW49ag0qtXL82dO1fLly9Xrly5dO7cOUlS7ty5lS1bNneWBgAALMCtY1QmTZqk6Oho1a5dW4UKFXLc5s+f786yAACARbj91A8AAEBKLHPVDwAAwL0IKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLIIKgAAwLLcGlQ2b96sFi1aKCAgQDabTcuWLXNnOQAAwGLcGlSuX7+uChUq6KuvvnJnGQAAwKI83bnxJk2aqEmTJu4sAQAAWJhbg0paxcbGKjY21jEdExPjxmoAAEBGe6SCyqhRozR8+HB3lwE8lKD3Vrq7BAB4ZDxSV/0MHjxY0dHRjtvp06fdXRIAAMhAj9QRFbvdLrvd7u4yAABAJnmkjqgAAIAni1uPqFy7dk1HjhxxTB8/flx79uxRvnz5VKxYMTdWBgAArMCtQWXHjh2qU6eOY7p///6SpC5dumj69OluqgoAAFiFW4NK7dq1ZYxxZwkAAMDCGKMCAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsi6ACAAAsyxJB5auvvlJQUJC8vb1VvXp1bdu2zd0lAQAAC3B7UJk/f7769++vDz74QLt27VKFChXUqFEjRUVFubs0AADgZm4PKuPGjdOrr76qbt26KTQ0VJMnT1b27Nk1bdo0d5cGAADczK1B5fbt29q5c6fq16/vmJclSxbVr19fW7ZscWNlAADACjzdufG//vpL8fHxKliwoNP8ggUL6sCBA0nax8bGKjY21jEdHR0tSYqJicmQ+hJib2TIev8uo2r/u8zYDwAp43WOR1lGPH8T12mMeWBbtwaVtBo1apSGDx+eZH7RokXdUE36yD3e3RUAyGi8zvEoy8jn79WrV5U7d+77tnFrUClQoIA8PDx0/vx5p/nnz5+Xv79/kvaDBw9W//79HdMJCQm6dOmS8ufPL5vNluH1WlFMTIyKFi2q06dPy8fHx93lPJLow/RBP6YP+jF90I/pI6P60Rijq1evKiAg4IFt3RpUvLy8VKVKFa1bt06tW7eWdDd8rFu3Tr17907S3m63y263O83LkydPJlRqfT4+PrwYHxJ9mD7ox/RBP6YP+jF9ZEQ/PuhISiK3n/rp37+/unTpoqpVq6patWoaP368rl+/rm7durm7NAAA4GZuDyrh4eG6cOGChg4dqnPnzqlixYpavXp1kgG2AADgyeP2oCJJvXv3TvZUDx7Mbrfrgw8+SHJKDKlHH6YP+jF90I/pg35MH1boR5tJzbVBAAAAbuD2b6YFAABICUEFAABYFkEFAABYFkEFAABYFkHFYr766isFBQXJ29tb1atX17Zt21Jse+fOHX344YcqWbKkvL29VaFCBa1evfqh1vm4SO9+HDVqlJ5++mnlypVLfn5+at26tQ4ePJjRu+F2GfF8TDR69GjZbDb169cvAyq3jozowz///FOdOnVS/vz5lS1bNpUvX147duzIyN1wu/Tux/j4eP3rX/9S8eLFlS1bNpUsWVIfffRRqn575lG1efNmtWjRQgEBAbLZbFq2bNkDl9m4caMqV64su92uUqVKafr06UnaZPhnjIFlzJs3z3h5eZlp06aZffv2mVdffdXkyZPHnD9/Ptn2gwYNMgEBAWblypXm6NGjZuLEicbb29vs2rXL5XU+DjKiHxs1amQiIiLMb7/9Zvbs2WOaNm1qihUrZq5du5ZZu5XpMqIfE23bts0EBQWZp556yvTt2zeD98R9MqIPL126ZAIDA03Xrl3N//73P3Ps2DGzZs0ac+TIkczarUyXEf04YsQIkz9/frNixQpz/Phxs3DhQpMzZ04zYcKEzNqtTLdq1Srz/vvvmyVLlhhJZunSpfdtf+zYMZM9e3bTv39/s3//fvPFF18YDw8Ps3r1akebzPiMIahYSLVq1UyvXr0c0/Hx8SYgIMCMGjUq2faFChUyX375pdO8F154wXTs2NHldT4OMqIf7xUVFWUkmU2bNqVP0RaUUf149epVExwcbCIjI02tWrUe66CSEX347rvvmpo1a2ZMwRaVEf3YrFkz07179/u2eZylJqgMGjTIlCtXzmleeHi4adSokWM6Mz5jOPVjEbdv39bOnTtVv359x7wsWbKofv362rJlS7LLxMbGytvb22letmzZ9OOPP7q8zkddRvRjcqKjoyVJ+fLlS4eqrScj+7FXr15q1qyZ07ofRxnVh999952qVq2qF198UX5+fqpUqZK++eabjNkJC8iofvzHP/6hdevW6dChQ5KkvXv36scff1STJk0yYC8eTVu2bEnyOm3UqJGj3zPrM4agYhF//fWX4uPjk/x0QMGCBXXu3Llkl2nUqJHGjRunw4cPKyEhQZGRkVqyZInOnj3r8jofdRnRj/dKSEhQv379VKNGDYWFhaX7PlhBRvXjvHnztGvXLo0aNSpD67eCjOrDY8eOadKkSQoODtaaNWv0xhtvqE+fPpoxY0aG7o+7ZFQ/vvfee2rfvr3KlCmjrFmzqlKlSurXr586duyYofvzKDl37lyy/R4TE6ObN29m2mcMQeURNmHCBAUHB6tMmTLy8vJS79691a1bN2XJwsOaFmntx169eum3337TvHnzMrlSa3tQP54+fVp9+/bVnDlzkvy1i7tS81xMSEhQ5cqVNXLkSFWqVEmvvfaaXn31VU2ePNmNlVtLavpxwYIFmjNnjubOnatdu3ZpxowZ+ve///3YBr5HGZ9oFlGgQAF5eHjo/PnzTvPPnz8vf3//ZJfx9fXVsmXLdP36dZ08eVIHDhxQzpw5VaJECZfX+ajLiH78u969e2vFihXasGGDihQpkiH7YAUZ0Y87d+5UVFSUKleuLE9PT3l6emrTpk36/PPP5enpqfj4+Azfr8yUUc/FQoUKKTQ01Gm5smXL6tSpU+m/ExaQUf34zjvvOI6qlC9fXq+88orefvvtJ+JoX2r5+/sn2+8+Pj7Kli1bpn3GEFQswsvLS1WqVNG6desc8xISErRu3To9++yz913W29tbhQsXVlxcnBYvXqxWrVo99DofVRnRj5JkjFHv3r21dOlSrV+/XsWLF8+wfbCCjOjHevXq6ddff9WePXsct6pVq6pjx47as2ePPDw8MnSfMltGPRdr1KiR5NL4Q4cOKTAwMH13wCIyqh9v3LiR5Kiph4eHEhIS0ncHHmHPPvusU79LUmRkpKPfM+0zJt2G5eKhzZs3z9jtdjN9+nSzf/9+89prr5k8efKYc+fOGWOMeeWVV8x7773naL9161azePFic/ToUbN582ZTt25dU7x4cXP58uVUr/NxlBH9+MYbb5jcuXObjRs3mrNnzzpuN27cyOzdyzQZ0Y/3etyv+smIPty2bZvx9PQ0I0aMMIcPHzZz5swx2bNnN7Nnz87s3cs0GdGPXbp0MYULF3ZcnrxkyRJToEABM2jQoMzevUxz9epVs3v3brN7924jyYwbN87s3r3bnDx50hhjzHvvvWdeeeUVR/vEy5Pfeecd8/vvv5uvvvoq2cuTM/ozhqBiMV988YUpVqyY8fLyMtWqVTNbt2513FerVi3TpUsXx/TGjRtN2bJljd1uN/nz5zevvPKK+fPPP9O0zsdVevejpGRvERERmbRH7pERz8e/e9yDijEZ04f/93//Z8LCwozdbjdlypQxU6ZMyYxdcav07seYmBjTt29fU6xYMePt7W1KlChh3n//fRMbG5tZu5TpNmzYkOz7WGLfdenSxdSqVSvJMhUrVjReXl6mRIkSyb7nZfRnjM2Yx/hr+AAAwCONMSoAAMCyCCoAAMCyCCoAAMCyCCoAAMCyCCoAAMCyCCoAAMCyCCoAAMCyCCoAAMCyCCoAHtqWLVvk4eGhZs2aOc3fuHGjbDabrly5kmSZoKAgjR8/3jFts9kcNx8fHz399NNavnx5kuVu3rypDz74QCEhIbLb7SpQoIBefPFF7du3L0nbmJgYvf/++ypTpoy8vb3l7++v+vXra8mSJeK7LoFHA0EFwEObOnWq3nrrLW3evFlnzpxxeT0RERE6e/asduzYoRo1aqhdu3b69ddfHffHxsaqfv36mjZtmj7++GMdOnRIq1atUlxcnKpXr66tW7c62l65ckX/+Mc/NHPmTA0ePFi7du3S5s2bFR4erkGDBik6Ovqh9hlA5vB0dwEAHm3Xrl3T/PnztWPHDp07d07Tp0/XkCFDXFpXnjx55O/vL39/f3300UeaMGGCNmzYoPLly0uSxo8fry1btmj37t2qUKGCJCkwMFCLFy9W9erV1aNHD/3222+y2WwaMmSITpw4oUOHDikgIMCxjZCQEHXo0EHe3t4Pv/MAMhxHVAA8lAULFqhMmTIqXbq0OnXqpGnTpj30aZW4uDhNnTpV0t2fkk80d+5cNWjQwBFSEmXJkkVvv/229u/fr7179yohIUHz5s1Tx44dnUJKopw5c8rTk7/TgEcBr1QAD2Xq1Knq1KmTJKlx48aKjo7Wpk2bVLt27TSvq0OHDvLw8NDNmzeVkJCgoKAgvfTSS477Dx06pDp16iS7bNmyZR1tAgICdPnyZZUpUybtOwTAUjiiAsBlBw8e1LZt29ShQwdJkqenp8LDwx1HQ9Lqs88+0549e/T9998rNDRU//nPf5QvXz6nNqk5WsNAWeDxwREVAC6bOnWq4uLinE6vGGNkt9v15ZdfysfHR5IUHR2tPHnyOC175coV5c6d22mev7+/SpUqpVKlSikiIkJNmzbV/v375efnJ+nu+JLff/892VoS54eEhMjX11d58uTRgQMH0mtXAbgJR1QAuCQuLk4zZ87U2LFjtWfPHsdt7969CggI0Lfffqvg4GBlyZJFO3fudFr22LFjio6OVkhISIrrr1atmqpUqaIRI0Y45rVv315r167V3r17ndomJCTos88+U2hoqCpUqKAsWbKoffv2mjNnTrJXIV27dk1xcXEP2QMAMoUBABcsXbrUeHl5mStXriS5b9CgQaZq1arGGGNee+01ExQUZJYvX26OHTtmNm3aZJ555hnzzDPPmISEBMcykszSpUud1rNq1Spjt9vNH3/8YYwx5ubNm6Z69eqmaNGiZsGCBebkyZNm27ZtpnXr1iZHjhxmy5YtjmUvXrxoypQpY4oUKWJmzJhh9u3bZw4dOmSmTp1qSpUqZS5fvpz+nQIg3dmM4WQugLRr0aKFEhIStHLlyiT3bdu2TdWrV9fevXsVEhKi0aNHa/78+Tp58qT8/f3VoEEDjRgxQgUKFHAsY7PZtHTpUrVu3doxzxij0NBQ1alTRxMnTpQk3bhxQyNHjnSsL1euXKpTp46GDRumsLAwpzqio6M1evRoLV68WCdPnlTevHlVvnx59erVS61atZLNZsuYzgGQbggqAADAshijAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALIugAgAALOv/AXmr4ymYI038AAAAAElFTkSuQmCC", - "text/plain": [ - "
" - ] - }, - "metadata": {}, - "output_type": "display_data" - } - ], - "source": [ - "from contextlib import redirect_stdout, redirect_stderr# suppress some stdout for better readability\n", - "import matplotlib.pyplot as plt\n", - "from random import randint\n", - "import os\n", - "\n", - "results = []\n", - "repeats = 50\n", - "\n", - "for _ in range(repeats):\n", - " with open(os.devnull, 'w') as fnull, redirect_stdout(fnull), redirect_stderr(fnull): # suppress output\n", - " val_loader, train_loader = dataloader_factory(randint(0, 9999))\n", - " model, optimiser, lr_scheduler = model_factory()\n", - " for epoch in range(epochs):\n", - " model = train_one_epoch(model, train_loader, val_loader, optimiser, lr_scheduler, loss_fn, epoch, eval=False)\n", - " _, auroc, _ = evaluate(model, val_loader, loss_fn)\n", - " results.append(auroc)\n", - "\n", - "plt.hist(results, bins=18)\n", - "plt.xlabel('AUROC')\n", - "plt.ylabel('Frequency')\n", - "plt.title('Distribution of AUROC results on validation split')\n", - "plt.show()" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "As we can see, the scores can vary quite significantly, ranging from <0.9 to a perfect score.\n", - "\n", - "To make the prediction more robust, we adapt two techniques:\n", - "\n", - "- Ensembling models trained on different folds of train-val data. Since the training is so fast, fitting a few addtional models is not a big deal. The train-val splitting method is provided by TDC.\n", + "def train_one_epoch(predictor, task, optimiser):\n", + " train_loss = 0\n", + " \n", + " for inputs, targets in task.train_loader:\n", + " optimiser.zero_grad()\n", + " logits = predictor(inputs, task_name=task.name).squeeze()\n", + " loss = task.get_loss(logits, targets)\n", + " loss.backward()\n", + " optimiser.step()\n", + " train_loss += loss.item()\n", "\n", - "- Rather than choosing the model at the last epoch, we will use best validation loss to decide which one to choose.\n", + " return predictor, train_loss / len(task.train_loader)\n", "\n", - "We already implemented a `dataloader_factory()` method that creates a new training and validation dataloader for each fold. Now, we will also build a method for ensemble-based evaluation, that uses a list of models to caculate the average logits for the prediction." - ] - }, - { - "cell_type": "code", - "execution_count": 32, - "metadata": {}, - "outputs": [], - "source": [ - "import torch\n", - "from sklearn.metrics import roc_auc_score, average_precision_score\n", "\n", - "def evaluate_ensemble(predictors, dataloader, loss_fn):\n", - " total_loss = 0\n", - " all_probs = []\n", - " all_targets = []\n", + "class AdmetDataset(Dataset):\n", + " def __init__(self, samples):\n", + " self.samples = samples['Embedding'].tolist()\n", + " self.targets = [float(target) for target in samples['Y'].tolist()]\n", "\n", - " with torch.no_grad():\n", - " \n", - " for inputs, targets in dataloader:\n", - " model_outputs = [predictor(inputs).squeeze() for predictor in predictors]\n", - " averaged_output = torch.sigmoid(torch.mean(torch.stack(model_outputs), dim=0))\n", + " def __len__(self):\n", + " return len(self.samples)\n", "\n", - " loss = loss_fn(averaged_output, targets)\n", - " total_loss += loss.item()\n", + " def __getitem__(self, idx):\n", + " sample = torch.tensor(self.samples[idx])\n", + " target = torch.tensor(self.targets[idx])\n", + " return sample, target\n", "\n", - " all_probs.extend(averaged_output.tolist())\n", - " all_targets.extend(targets.tolist())\n", "\n", - " loss = total_loss / len(all_probs)\n", - " return loss, roc_auc_score(all_targets, all_probs), average_precision_score(all_targets, all_probs)" - ] - }, - { - "cell_type": "markdown", - "metadata": {}, - "source": [ - "Finally, let's see how much better our model gets!" + "class Task:\n", + " def __init__(self, dataset_name, featuriser):\n", + " benchmark = group.get(dataset_name)\n", + " with open(os.devnull, 'w') as fnull, redirect_stdout(fnull), redirect_stderr(fnull): # suppress output\n", + " mols_test = benchmark['test']\n", + " mols_train, mols_valid = group.get_train_valid_split(benchmark=dataset_name, seed=42)\n", + " mols_test['Embedding'] = featuriser(list(mols_test['Drug']))\n", + " mols_train['Embedding'] = featuriser(list(mols_train['Drug']))\n", + " mols_valid['Embedding'] = featuriser(list(mols_valid['Drug']))\n", + " self.name = dataset_name\n", + " self.test_loader = DataLoader(AdmetDataset(mols_test), batch_size=128, shuffle=False)\n", + " self.val_loader = DataLoader(AdmetDataset(mols_valid), batch_size=128, shuffle=False)\n", + " self.train_loader = DataLoader(AdmetDataset(mols_train), batch_size=32, shuffle=True)\n", + " self.task = 'classification' if len(benchmark['test']['Y'].unique()) == 2 else 'regression'\n", + " self.loss_fn = nn.BCELoss() if self.task == 'classification' else nn.MSELoss() \n", + "\n", + " def get_loss(self, logits, targets):\n", + " if self.task == 'classification':\n", + " return self.loss_fn(torch.sigmoid(logits), targets)\n", + " else:\n", + " return self.loss_fn(logits, targets)" ] }, { "cell_type": "code", - "execution_count": 35, + "execution_count": 2, "metadata": {}, "outputs": [ + { + "name": "stderr", + "output_type": "stream", + "text": [ + "Found local copy...\n" + ] + }, { "name": "stdout", "output_type": "stream", "text": [ - "# Fold 1 / 5\n", - "## Epoch 1\ttrain_loss: 0.0195\tval_loss: 0.0112\tval_auroc: 0.3277\tval_avpr: 0.8480\n", - "## Epoch 2\ttrain_loss: 0.0172\tval_loss: 0.0091\tval_auroc: 0.7815\tval_avpr: 0.9616\n", - "## Epoch 3\ttrain_loss: 0.0117\tval_loss: 0.0059\tval_auroc: 0.9160\tval_avpr: 0.9873\n", - "## Epoch 4\ttrain_loss: 0.0073\tval_loss: 0.0041\tval_auroc: 0.9524\tval_avpr: 0.9931\n", - "## Epoch 5\ttrain_loss: 0.0049\tval_loss: 0.0033\tval_auroc: 0.9636\tval_avpr: 0.9951\n", - "## Epoch 6\ttrain_loss: 0.0036\tval_loss: 0.0033\tval_auroc: 0.9580\tval_avpr: 0.9943\n", - "## Epoch 7\ttrain_loss: 0.0023\tval_loss: 0.0028\tval_auroc: 0.9720\tval_avpr: 0.9964\n", - "## Epoch 8\ttrain_loss: 0.0021\tval_loss: 0.0026\tval_auroc: 0.9776\tval_avpr: 0.9971\n", - "## Epoch 9\ttrain_loss: 0.0016\tval_loss: 0.0027\tval_auroc: 0.9720\tval_avpr: 0.9963\n", - "## Epoch 10\ttrain_loss: 0.0010\tval_loss: 0.0025\tval_auroc: 0.9748\tval_avpr: 0.9967\n", - "## Epoch 11\ttrain_loss: 0.0008\tval_loss: 0.0022\tval_auroc: 0.9776\tval_avpr: 0.9971\n", - "## Epoch 12\ttrain_loss: 0.0007\tval_loss: 0.0022\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 13\ttrain_loss: 0.0007\tval_loss: 0.0026\tval_auroc: 0.9888\tval_avpr: 0.9986\n", - "## Epoch 14\ttrain_loss: 0.0006\tval_loss: 0.0022\tval_auroc: 0.9776\tval_avpr: 0.9971\n", - "## Epoch 15\ttrain_loss: 0.0006\tval_loss: 0.0022\tval_auroc: 0.9776\tval_avpr: 0.9971\n", - "## Epoch 16\ttrain_loss: 0.0005\tval_loss: 0.0021\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 17\ttrain_loss: 0.0005\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 18\ttrain_loss: 0.0005\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 19\ttrain_loss: 0.0004\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 20\ttrain_loss: 0.0004\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 21\ttrain_loss: 0.0004\tval_loss: 0.0020\tval_auroc: 0.9832\tval_avpr: 0.9978\n", - "## Epoch 22\ttrain_loss: 0.0003\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 23\ttrain_loss: 0.0004\tval_loss: 0.0020\tval_auroc: 0.9832\tval_avpr: 0.9978\n", - "## Epoch 24\ttrain_loss: 0.0006\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "## Epoch 25\ttrain_loss: 0.0004\tval_loss: 0.0020\tval_auroc: 0.9804\tval_avpr: 0.9974\n", - "# Fold 2 / 5\n", - "## Epoch 1\ttrain_loss: 0.0196\tval_loss: 0.0098\tval_auroc: 0.3132\tval_avpr: 0.8523\n", - "## Epoch 2\ttrain_loss: 0.0169\tval_loss: 0.0084\tval_auroc: 0.4151\tval_avpr: 0.9037\n", - "## Epoch 3\ttrain_loss: 0.0117\tval_loss: 0.0059\tval_auroc: 0.8830\tval_avpr: 0.9878\n", - "## Epoch 4\ttrain_loss: 0.0076\tval_loss: 0.0037\tval_auroc: 0.9358\tval_avpr: 0.9937\n", - "## Epoch 5\ttrain_loss: 0.0055\tval_loss: 0.0030\tval_auroc: 0.9509\tval_avpr: 0.9951\n", - "## Epoch 6\ttrain_loss: 0.0038\tval_loss: 0.0026\tval_auroc: 0.9660\tval_avpr: 0.9968\n", - "## Epoch 7\ttrain_loss: 0.0028\tval_loss: 0.0024\tval_auroc: 0.9698\tval_avpr: 0.9972\n", - "## Epoch 8\ttrain_loss: 0.0020\tval_loss: 0.0021\tval_auroc: 0.9811\tval_avpr: 0.9982\n", - "## Epoch 9\ttrain_loss: 0.0013\tval_loss: 0.0021\tval_auroc: 0.9736\tval_avpr: 0.9976\n", - "## Epoch 10\ttrain_loss: 0.0012\tval_loss: 0.0019\tval_auroc: 0.9811\tval_avpr: 0.9983\n", - "## Epoch 11\ttrain_loss: 0.0009\tval_loss: 0.0018\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 12\ttrain_loss: 0.0009\tval_loss: 0.0018\tval_auroc: 0.9811\tval_avpr: 0.9983\n", - "## Epoch 13\ttrain_loss: 0.0008\tval_loss: 0.0017\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 14\ttrain_loss: 0.0006\tval_loss: 0.0017\tval_auroc: 0.9887\tval_avpr: 0.9990\n", - "## Epoch 15\ttrain_loss: 0.0005\tval_loss: 0.0017\tval_auroc: 0.9887\tval_avpr: 0.9990\n", - "## Epoch 16\ttrain_loss: 0.0006\tval_loss: 0.0019\tval_auroc: 0.9887\tval_avpr: 0.9990\n", - "## Epoch 17\ttrain_loss: 0.0005\tval_loss: 0.0017\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 18\ttrain_loss: 0.0005\tval_loss: 0.0018\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 19\ttrain_loss: 0.0004\tval_loss: 0.0018\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 20\ttrain_loss: 0.0004\tval_loss: 0.0017\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 21\ttrain_loss: 0.0004\tval_loss: 0.0018\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 22\ttrain_loss: 0.0004\tval_loss: 0.0018\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 23\ttrain_loss: 0.0005\tval_loss: 0.0017\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 24\ttrain_loss: 0.0004\tval_loss: 0.0017\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "## Epoch 25\ttrain_loss: 0.0004\tval_loss: 0.0016\tval_auroc: 0.9849\tval_avpr: 0.9986\n", - "# Fold 3 / 5\n", - "## Epoch 1\ttrain_loss: 0.0241\tval_loss: 0.0135\tval_auroc: 0.5641\tval_avpr: 0.9059\n", - "## Epoch 2\ttrain_loss: 0.0212\tval_loss: 0.0112\tval_auroc: 0.7853\tval_avpr: 0.9595\n", - "## Epoch 3\ttrain_loss: 0.0142\tval_loss: 0.0072\tval_auroc: 0.9199\tval_avpr: 0.9890\n", - "## Epoch 4\ttrain_loss: 0.0087\tval_loss: 0.0042\tval_auroc: 0.9712\tval_avpr: 0.9966\n", - "## Epoch 5\ttrain_loss: 0.0057\tval_loss: 0.0030\tval_auroc: 0.9840\tval_avpr: 0.9982\n", - "## Epoch 6\ttrain_loss: 0.0040\tval_loss: 0.0028\tval_auroc: 0.9808\tval_avpr: 0.9979\n", - "## Epoch 7\ttrain_loss: 0.0028\tval_loss: 0.0024\tval_auroc: 0.9872\tval_avpr: 0.9986\n", - "## Epoch 8\ttrain_loss: 0.0021\tval_loss: 0.0022\tval_auroc: 0.9808\tval_avpr: 0.9978\n", - "## Epoch 9\ttrain_loss: 0.0016\tval_loss: 0.0021\tval_auroc: 0.9872\tval_avpr: 0.9985\n", - "## Epoch 10\ttrain_loss: 0.0013\tval_loss: 0.0021\tval_auroc: 0.9840\tval_avpr: 0.9982\n", - "## Epoch 11\ttrain_loss: 0.0011\tval_loss: 0.0019\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 12\ttrain_loss: 0.0016\tval_loss: 0.0017\tval_auroc: 0.9808\tval_avpr: 0.9978\n", - "## Epoch 13\ttrain_loss: 0.0007\tval_loss: 0.0016\tval_auroc: 0.9872\tval_avpr: 0.9986\n", - "## Epoch 14\ttrain_loss: 0.0009\tval_loss: 0.0016\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 15\ttrain_loss: 0.0008\tval_loss: 0.0017\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 16\ttrain_loss: 0.0007\tval_loss: 0.0017\tval_auroc: 0.9840\tval_avpr: 0.9982\n", - "## Epoch 17\ttrain_loss: 0.0006\tval_loss: 0.0015\tval_auroc: 0.9872\tval_avpr: 0.9985\n", - "## Epoch 18\ttrain_loss: 0.0005\tval_loss: 0.0015\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 19\ttrain_loss: 0.0007\tval_loss: 0.0014\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 20\ttrain_loss: 0.0005\tval_loss: 0.0015\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 21\ttrain_loss: 0.0006\tval_loss: 0.0014\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 22\ttrain_loss: 0.0005\tval_loss: 0.0014\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 23\ttrain_loss: 0.0009\tval_loss: 0.0014\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 24\ttrain_loss: 0.0004\tval_loss: 0.0015\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "## Epoch 25\ttrain_loss: 0.0004\tval_loss: 0.0014\tval_auroc: 0.9904\tval_avpr: 0.9989\n", - "# Fold 4 / 5\n", - "## Epoch 1\ttrain_loss: 0.0242\tval_loss: 0.0124\tval_auroc: 0.5370\tval_avpr: 0.9192\n", - "## Epoch 2\ttrain_loss: 0.0212\tval_loss: 0.0105\tval_auroc: 0.5880\tval_avpr: 0.9397\n", - "## Epoch 3\ttrain_loss: 0.0140\tval_loss: 0.0069\tval_auroc: 0.8148\tval_avpr: 0.9805\n", - "## Epoch 4\ttrain_loss: 0.0088\tval_loss: 0.0041\tval_auroc: 0.8565\tval_avpr: 0.9864\n", - "## Epoch 5\ttrain_loss: 0.0058\tval_loss: 0.0032\tval_auroc: 0.9213\tval_avpr: 0.9938\n", - "## Epoch 6\ttrain_loss: 0.0040\tval_loss: 0.0025\tval_auroc: 0.9583\tval_avpr: 0.9969\n", - "## Epoch 7\ttrain_loss: 0.0028\tval_loss: 0.0024\tval_auroc: 0.9722\tval_avpr: 0.9980\n", - "## Epoch 8\ttrain_loss: 0.0021\tval_loss: 0.0024\tval_auroc: 0.9722\tval_avpr: 0.9980\n", - "## Epoch 9\ttrain_loss: 0.0016\tval_loss: 0.0022\tval_auroc: 0.9722\tval_avpr: 0.9980\n", - "## Epoch 10\ttrain_loss: 0.0015\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "## Epoch 11\ttrain_loss: 0.0011\tval_loss: 0.0022\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 12\ttrain_loss: 0.0010\tval_loss: 0.0023\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 13\ttrain_loss: 0.0007\tval_loss: 0.0023\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 14\ttrain_loss: 0.0007\tval_loss: 0.0022\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 15\ttrain_loss: 0.0006\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "## Epoch 16\ttrain_loss: 0.0006\tval_loss: 0.0021\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 17\ttrain_loss: 0.0007\tval_loss: 0.0021\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 18\ttrain_loss: 0.0007\tval_loss: 0.0021\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 19\ttrain_loss: 0.0004\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "## Epoch 20\ttrain_loss: 0.0004\tval_loss: 0.0021\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 21\ttrain_loss: 0.0004\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "## Epoch 22\ttrain_loss: 0.0004\tval_loss: 0.0021\tval_auroc: 0.9769\tval_avpr: 0.9983\n", - "## Epoch 23\ttrain_loss: 0.0004\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "## Epoch 24\ttrain_loss: 0.0004\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "## Epoch 25\ttrain_loss: 0.0005\tval_loss: 0.0021\tval_auroc: 0.9815\tval_avpr: 0.9987\n", - "# Fold 5 / 5\n", - "## Epoch 1\ttrain_loss: 0.0236\tval_loss: 0.0113\tval_auroc: 0.3108\tval_avpr: 0.8229\n", - "## Epoch 2\ttrain_loss: 0.0205\tval_loss: 0.0093\tval_auroc: 0.4486\tval_avpr: 0.8574\n", - "## Epoch 3\ttrain_loss: 0.0136\tval_loss: 0.0060\tval_auroc: 0.6466\tval_avpr: 0.9016\n", - "## Epoch 4\ttrain_loss: 0.0080\tval_loss: 0.0043\tval_auroc: 0.7619\tval_avpr: 0.9576\n", - "## Epoch 5\ttrain_loss: 0.0052\tval_loss: 0.0037\tval_auroc: 0.8396\tval_avpr: 0.9763\n", - "## Epoch 6\ttrain_loss: 0.0038\tval_loss: 0.0033\tval_auroc: 0.9148\tval_avpr: 0.9889\n", - "## Epoch 7\ttrain_loss: 0.0027\tval_loss: 0.0031\tval_auroc: 0.9273\tval_avpr: 0.9904\n", - "## Epoch 8\ttrain_loss: 0.0023\tval_loss: 0.0030\tval_auroc: 0.9223\tval_avpr: 0.9895\n", - "## Epoch 9\ttrain_loss: 0.0017\tval_loss: 0.0029\tval_auroc: 0.9424\tval_avpr: 0.9926\n", - "## Epoch 10\ttrain_loss: 0.0014\tval_loss: 0.0028\tval_auroc: 0.9424\tval_avpr: 0.9925\n", - "## Epoch 11\ttrain_loss: 0.0011\tval_loss: 0.0027\tval_auroc: 0.9424\tval_avpr: 0.9924\n", - "## Epoch 12\ttrain_loss: 0.0008\tval_loss: 0.0027\tval_auroc: 0.9549\tval_avpr: 0.9943\n", - "## Epoch 13\ttrain_loss: 0.0007\tval_loss: 0.0026\tval_auroc: 0.9524\tval_avpr: 0.9938\n", - "## Epoch 14\ttrain_loss: 0.0007\tval_loss: 0.0026\tval_auroc: 0.9398\tval_avpr: 0.9921\n", - "## Epoch 15\ttrain_loss: 0.0007\tval_loss: 0.0028\tval_auroc: 0.9499\tval_avpr: 0.9934\n", - "## Epoch 16\ttrain_loss: 0.0007\tval_loss: 0.0026\tval_auroc: 0.9524\tval_avpr: 0.9938\n", - "## Epoch 17\ttrain_loss: 0.0008\tval_loss: 0.0025\tval_auroc: 0.9699\tval_avpr: 0.9963\n", - "## Epoch 18\ttrain_loss: 0.0008\tval_loss: 0.0024\tval_auroc: 0.9649\tval_avpr: 0.9956\n", - "## Epoch 19\ttrain_loss: 0.0007\tval_loss: 0.0025\tval_auroc: 0.9649\tval_avpr: 0.9956\n", - "## Epoch 20\ttrain_loss: 0.0005\tval_loss: 0.0027\tval_auroc: 0.9649\tval_avpr: 0.9956\n", - "## Epoch 21\ttrain_loss: 0.0006\tval_loss: 0.0029\tval_auroc: 0.9649\tval_avpr: 0.9957\n", - "## Epoch 22\ttrain_loss: 0.0005\tval_loss: 0.0028\tval_auroc: 0.9624\tval_avpr: 0.9953\n", - "## Epoch 23\ttrain_loss: 0.0005\tval_loss: 0.0028\tval_auroc: 0.9599\tval_avpr: 0.9950\n", - "## Epoch 24\ttrain_loss: 0.0005\tval_loss: 0.0029\tval_auroc: 0.9599\tval_avpr: 0.9950\n", - "## Epoch 25\ttrain_loss: 0.0004\tval_loss: 0.0028\tval_auroc: 0.9574\tval_avpr: 0.9946\n", - "test_loss: 0.0012\n", - "test_auroc: 0.9975\n", - "test_avpr: 0.9993\n" + "featurising datasets\n", + "dataset=1 / 22\n", + "dataset=2 / 22\n", + "dataset=3 / 22\n", + "dataset=4 / 22\n", + "dataset=5 / 22\n" ] } ], "source": [ - "from copy import deepcopy\n", + "EPOCHS = 25\n", "\n", - "seeds = [1, 2, 3, 4, 5]\n", + "group = admet_group(path='admet_data/')\n", + "featuriser = Minimol()\n", + "tasks = {}\n", "\n", - "best_models = []\n", + "print('featurising datasets')\n", + "for dataset_i, dataset_name in enumerate(group.dataset_names):\n", + " print(f'dataset={dataset_i + 1} / {len(group.dataset_names)}')\n", + " tasks[dataset_name] = Task(dataset_name, featuriser) \n", "\n", - "for fold_i, seed in enumerate(seeds):\n", - " print(f\"# Fold {fold_i +1} / {len(seeds)}\")\n", - " with open(os.devnull, 'w') as fnull, redirect_stdout(fnull), redirect_stderr(fnull): # suppress output\n", - " val_loader, train_loader = dataloader_factory(seed)\n", - " model, optimiser, lr_scheduler = model_factory()\n", - "\n", - " best_epoch = {\"model\": None, \"result\": None}\n", - " for epoch in range(epochs):\n", - " model = train_one_epoch(model, train_loader, val_loader, optimiser, lr_scheduler, loss_fn, epoch)\n", - " val_loss, _, _ = evaluate(model, val_loader, loss_fn)\n", - "\n", - " if best_epoch['model'] is None:\n", - " best_epoch['model'] = deepcopy(model)\n", - " best_epoch['result'] = deepcopy(val_loss)\n", - " else:\n", - " best_epoch['model'] = best_epoch['model'] if best_epoch['result'] <= val_loss else deepcopy(model)\n", - " best_epoch['result'] = best_epoch['result'] if best_epoch['result'] <= val_loss else deepcopy(val_loss)\n", - "\n", - " best_models.append(deepcopy(best_epoch['model']))\n", - "\n", - "test_loss, test_auroc, test_avpr = evaluate_ensemble(best_models, test_loader, loss_fn)\n", - "print(\n", - " f\"test_loss: {test_loss:.4f}\\n\"\n", - " f\"test_auroc: {test_auroc:.4f}\\n\"\n", - " f\"test_avpr: {test_avpr:.4f}\"\n", - ")" + "del featuriser" ] }, { - "cell_type": "markdown", + "cell_type": "code", + "execution_count": null, "metadata": {}, + "outputs": [], "source": [ - "In about 15s an ensemble was build reaching the performance of 0.9975 in AUROC on the test set. This is slightly better than the performance we achieved with a single model, but more importantly, the ensemble is not senstitive to which part of the data is used for validation, and is less sensitive to the intialisation because we intialise n models getting somewhere close to an average performance.\n", + "model, optimiser, lr_scheduler = model_factory()\n", "\n", - "This score is better than the SoTA, showcasing how powerful MiniMol is in featurising molecules for downstream biological tasks." + "model.unfreeze_trunk()\n", + "for epoch in range(EPOCHS):\n", + " for task_i, (task_name, task) in enumerate(tasks.items()):\n", + " #lr_scheduler.step(epoch)\n", + " model, train_loss = train_one_epoch(model, task, optimiser, lr_scheduler)\n", + " val_loss = evaluate(model, task, eval_type='val')\n", + " print(f'epoch={epoch+1} / {EPOCHS} | {task_name=} | {train_loss:.4f=} | {val_loss:.4f=}')" ] } ], diff --git a/shared_downstream_adaptation.py b/shared_downstream_adaptation.py new file mode 100644 index 0000000..a93f56a --- /dev/null +++ b/shared_downstream_adaptation.py @@ -0,0 +1,189 @@ +from minimol import Minimol + +import os +import math +from copy import deepcopy +import pickle + +import torch +import torch.nn as nn +import torch.optim as optim +import torch.nn.functional as F +from torch.optim.lr_scheduler import LambdaLR +from torch.utils.data import DataLoader, Dataset + +from tdc.benchmark_group import admet_group + +from contextlib import redirect_stdout, redirect_stderr + + +class MultiTaskModel(nn.Module): + def __init__(self, hidden_dim=512, input_dim=512, head_hidden_dim=256, dropout=0.1, task_names=None): + super(MultiTaskModel, self).__init__() + + self.dense1 = nn.Linear(input_dim, hidden_dim) + self.dense2 = nn.Linear(hidden_dim, hidden_dim) + self.bn1 = nn.BatchNorm1d(hidden_dim) + self.bn2 = nn.BatchNorm1d(hidden_dim) + self.dropout = nn.Dropout(dropout) + + self.heads = nn.ModuleDict({ + task_name: nn.Sequential( + nn.Linear(hidden_dim, head_hidden_dim), + nn.ReLU(), + nn.Dropout(dropout), + nn.Linear(head_hidden_dim, 1) + ) for task_name in task_names + }) + + self.trunk_frozen = False + + def forward(self, x, task_name): + x = self.dense1(x) + x = self.bn1(x) + x = F.relu(x) + x = self.dropout(x) + + x = self.dense2(x) + x = self.bn2(x) + x = F.relu(x) + x = self.dropout(x) + + x = self.heads[task_name](x) + return x + + def freeze_trunk(self): + self.trunk_frozen = True + for param in self.dense1.parameters(): + param.requires_grad = False + for param in self.dense2.parameters(): + param.requires_grad = False + for param in self.bn1.parameters(): + param.requires_grad = False + for param in self.bn2.parameters(): + param.requires_grad = False + + def unfreeze_trunk(self): + self.trunk_frozen = False + for param in self.dense1.parameters(): + param.requires_grad = True + for param in self.dense2.parameters(): + param.requires_grad = True + for param in self.bn1.parameters(): + param.requires_grad = True + for param in self.bn2.parameters(): + param.requires_grad = True + + + +def model_factory(lr=3e-3, epochs=25, warmup=5, weight_decay=1e-4): + model = MultiTaskModel() + optimiser = optim.adam(model.parameters(), lr=lr, weight_decay=weight_decay) + + def lr_fn(epoch): + if epoch < warmup: return epoch / warmup + else: return (1 + math.cos(math.pi * (epoch - warmup) / (epochs - warmup))) / 2 + + lr_scheduler = LambdaLR(optimiser, lr_lambda=lr_fn) + return model, optimiser, lr_scheduler + + +def evaluate(predictor, task, eval_type='val'): + predictor.eval() + total_loss = 0 + + dataloader = task.val_dataloader if eval_type == 'val' else task.test_dataloader + + with torch.no_grad(): + for inputs, targets in dataloader: + logits = predictor(inputs, task_name=task.name).squeeze() + loss = task.get_loss(logits, targets) + total_loss += loss.item() + + loss = total_loss / len(dataloader) + + return loss + + +def evaluate_ensemble(predictors, dataloader, task): + predictions = [] + with torch.no_grad(): + + for inputs, _ in dataloader: + ensemble_logits = [predictor(inputs).squeeze() for predictor in predictors] + averaged_logits = torch.mean(torch.stack(ensemble_logits), dim=0) + if task == 'classification': + predictions += torch.sigmoid(averaged_logits) + else: + predictions += averaged_logits + + return predictions + + +def train_one_epoch(predictor, task, optimiser): + train_loss = 0 + + for inputs, targets in task.train_loader: + optimiser.zero_grad() + logits = predictor(inputs, task_name=task.name).squeeze() + loss = task.get_loss(logits, targets) + loss.backward() + optimiser.step() + train_loss += loss.item() + + return predictor, train_loss / len(task.train_loader) + + +class AdmetDataset(Dataset): + def __init__(self, samples): + self.samples = samples['Embedding'].tolist() + self.targets = [float(target) for target in samples['Y'].tolist()] + + def __len__(self): + return len(self.samples) + + def __getitem__(self, idx): + sample = torch.tensor(self.samples[idx]) + target = torch.tensor(self.targets[idx]) + return sample, target + + +class Task: + def __init__(self, dataset_name, featuriser): + benchmark = group.get(dataset_name) + with open(os.devnull, 'w') as fnull, redirect_stdout(fnull), redirect_stderr(fnull): # suppress output + mols_test = benchmark['test'] + mols_train, mols_valid = group.get_train_valid_split(benchmark=dataset_name, seed=42) + mols_test['Embedding'] = featuriser(list(mols_test['Drug'])) + mols_train['Embedding'] = featuriser(list(mols_train['Drug'])) + mols_valid['Embedding'] = featuriser(list(mols_valid['Drug'])) + self.name = dataset_name + self.test_loader = DataLoader(AdmetDataset(mols_test), batch_size=128, shuffle=False) + self.val_loader = DataLoader(AdmetDataset(mols_valid), batch_size=128, shuffle=False) + self.train_loader = DataLoader(AdmetDataset(mols_train), batch_size=32, shuffle=True) + self.task = 'classification' if len(benchmark['test']['Y'].unique()) == 2 else 'regression' + self.loss_fn = nn.BCELoss() if self.task == 'classification' else nn.MSELoss() + + def get_loss(self, logits, targets): + if self.task == 'classification': + return self.loss_fn(torch.sigmoid(logits), targets) + else: + return self.loss_fn(logits, targets) + + + +EPOCHS = 25 + +group = admet_group(path='admet_data/') +featuriser = Minimol() +tasks = {dataset_name: Task(dataset_name, featuriser) for dataset_name in group.dataset_names} +del featuriser +model, optimiser, lr_scheduler = model_factory() + +model.unfreeze_trunk() +for epoch in range(EPOCHS): + for task_i, (task_name, task) in enumerate(tasks.items()): + #lr_scheduler.step(epoch) + model, train_loss = train_one_epoch(model, task, optimiser, lr_scheduler) + val_loss = evaluate(model, task, eval_type='val') + print(f'epoch={epoch+1} / {EPOCHS} | {task_name=} | {train_loss:.4f=} | {val_loss:.4f=}') \ No newline at end of file