From b0909305f99a47fe29e2ee8d7c82f6cb285b63f3 Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Fri, 7 Jun 2019 13:44:57 -0400 Subject: [PATCH 1/8] update main to use os.path.join --- deepprofiler/__main__.py | 60 +++++++++++++++++++++------------------- 1 file changed, 32 insertions(+), 28 deletions(-) diff --git a/deepprofiler/__main__.py b/deepprofiler/__main__.py index 6a8110e..552f743 100644 --- a/deepprofiler/__main__.py +++ b/deepprofiler/__main__.py @@ -4,7 +4,6 @@ import click import deepprofiler.dataset.compression -import deepprofiler.dataset.image_dataset import deepprofiler.dataset.indexing import deepprofiler.dataset.illumination_statistics import deepprofiler.dataset.metadata @@ -31,41 +30,46 @@ def cli(context, root, config, cores): dirs = { "root": root, - "locations": root+"/inputs/locations/", # TODO: use os.path.join() - "config": root+"/inputs/config/", - "images": root+"/inputs/images/", - "metadata": root+"/inputs/metadata/", - "preprocessed": root+"/inputs/preprocessed/", - "pretrained": root+"/inputs/pretrained/", - "intensities": root+"/outputs/intensities/", - "compressed_images": root+"/outputs/compressed/images/", - "compressed_metadata": root+"/outputs/compressed/metadata/", - "training": root+"/outputs/training/", - "checkpoints": root+"/outputs/training/checkpoint/", - "logs": root+"/outputs/training/logs/", - "summaries": root+"/outputs/training/summaries/", - "features": root+"/outputs/features/" + "locations": os.path.join(root, "inputs", "locations"), + "config": os.path.join(root, "inputs", "config"), + "images": os.path.join(root, "inputs", "images"), + "metadata": os.path.join(root, "inputs", "metadata"), + "preprocessed": os.path.join(root, "inputs", "preprocessed"), + "pretrained": os.path.join(root, "inputs", "pretrained"), + "intensities": os.path.join(root, "outputs", "intensities"), + "compressed_images": os.path.join(root, "outputs", "compressed", "images"), + "compressed_metadata": os.path.join(root, "outputs", "compressed", "metadata"), + "training": os.path.join(root, "outputs", "training"), + "checkpoints": os.path.join(root, "outputs", "training", "checkpoint"), + "logs": os.path.join(root, "outputs", "training", "logs"), + "summaries": os.path.join(root, "outputs", "training", "summaries"), + "features": os.path.join(root, "outputs", "features") } if config is not None: + context.obj["config"] = {} context.obj["config"]["paths"] = {} context.obj["config"]["paths"]["config"] = config dirs["config"] = os.path.dirname(os.path.abspath(config)) else: - config = dirs["config"] + "/config.json" + config = os.path.join(dirs["config"], "config.json") + context.obj["cores"] = cores + if os.path.isfile(config): with open(config, "r") as f: params = json.load(f) if "paths" in params.keys(): for key, value in dirs.items(): if key not in params["paths"].keys(): - params["paths"][key] = dirs[key] + params["paths"][key] = os.path.join(root, dirs[key]) else: - dirs[key] = params["paths"][key] + dirs[key] = os.path.join(root, params["paths"][key]) + else: params["paths"] = dirs - params["paths"]["index"] = params["paths"]["metadata"] + "/index.csv" + + params["paths"]["index"] = os.path.join(root, params["paths"]["metadata"], "index.csv") context.obj["config"] = params process = deepprofiler.dataset.utils.Parallel(context.obj["config"], numProcs=context.obj["cores"]) context.obj["process"] = process @@ -106,7 +110,7 @@ def prepare(context): metadata = deepprofiler.dataset.metadata.read_plates(context.obj["config"]["paths"]["index"]) # reinitialize generator process.compute(deepprofiler.dataset.compression.compress_plate, metadata) deepprofiler.dataset.indexing.write_compression_index(context.obj["config"]) - context.parent.obj["config"]["paths"]["index"] = context.obj["config"]["paths"]["compressed_metadata"]+"/compressed.csv" + context.parent.obj["config"]["paths"]["index"] = os.path.join(context.obj["config"]["paths"]["compressed_metadata"], "compressed.csv") print("Compression complete!") @@ -117,7 +121,7 @@ def prepare(context): @click.pass_context def optimize(context, epoch, seed): if context.parent.obj["config"]["prepare"]["compression"]["implement"]: - context.parent.obj["config"]["paths"]["index"] = context.obj["config"]["paths"]["compressed_metadata"]+"/compressed.csv" + context.parent.obj["config"]["paths"]["index"] = os.path.join(context.obj["config"]["paths"]["compressed_metadata"], "compressed.csv") context.parent.obj["config"]["paths"]["images"] = context.obj["config"]["paths"]["compressed_images"] metadata = deepprofiler.dataset.image_dataset.read_dataset(context.obj["config"]) optim = deepprofiler.learning.optimization.Optimize(context.obj["config"], metadata, epoch, seed) @@ -131,7 +135,7 @@ def optimize(context, epoch, seed): @click.pass_context def train(context, epoch, seed): if context.parent.obj["config"]["prepare"]["compression"]["implement"]: - context.parent.obj["config"]["paths"]["index"] = context.obj["config"]["paths"]["compressed_metadata"]+"/compressed.csv" + context.parent.obj["config"]["paths"]["index"] = os.path.join(context.obj["config"]["paths"]["compressed_metadata"], "compressed.csv") context.parent.obj["config"]["paths"]["images"] = context.obj["config"]["paths"]["compressed_images"] metadata = deepprofiler.dataset.image_dataset.read_dataset(context.obj["config"]) deepprofiler.learning.training.learn_model(context.obj["config"], metadata, epoch, seed) @@ -141,12 +145,12 @@ def train(context, epoch, seed): @cli.command() @click.pass_context @click.option("--part", - help="Part of index to process", - default=-1, + help="Part of index to process", + default=-1, type=click.INT) def profile(context, part): if context.parent.obj["config"]["prepare"]["compression"]["implement"]: - context.parent.obj["config"]["paths"]["index"] = context.obj["config"]["paths"]["compressed_metadata"]+"/compressed.csv" + context.parent.obj["config"]["paths"]["index"] = os.path.join(context.obj["config"]["paths"]["compressed_metadata"], "compressed.csv") context.parent.obj["config"]["paths"]["images"] = context.obj["config"]["paths"]["compressed_images"] config = context.obj["config"] if part >= 0: @@ -154,17 +158,17 @@ def profile(context, part): config["paths"]["index"] = context.obj["config"]["paths"]["index"].replace("index.csv", partfile) metadata = deepprofiler.dataset.image_dataset.read_dataset(context.obj["config"]) deepprofiler.learning.profiling.profile(context.obj["config"], metadata) - + # Auxiliary tool: Split index in multiple parts @cli.command() @click.pass_context -@click.option("--parts", +@click.option("--parts", help="Number of parts to split the index", type=click.INT) def split(context, parts): if context.parent.obj["config"]["prepare"]["compression"]["implement"]: - context.parent.obj["config"]["paths"]["index"] = context.obj["config"]["paths"]["compressed_metadata"]+"/compressed.csv" + context.parent.obj["config"]["paths"]["index"] = os.path.join(context.obj["config"]["paths"]["compressed_metadata"], "compressed.csv") context.parent.obj["config"]["paths"]["images"] = context.obj["config"]["paths"]["compressed_images"] deepprofiler.dataset.indexing.split_index(context.obj["config"], parts) From e6ba3180d582d77d18a1918cbeeed26b0d04319a Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Fri, 7 Jun 2019 13:49:21 -0400 Subject: [PATCH 2/8] use os.path.join in image_datasets.py --- deepprofiler/dataset/image_dataset.py | 22 +++++++++++----------- 1 file changed, 11 insertions(+), 11 deletions(-) diff --git a/deepprofiler/dataset/image_dataset.py b/deepprofiler/dataset/image_dataset.py index 7182d5a..472ffb5 100644 --- a/deepprofiler/dataset/image_dataset.py +++ b/deepprofiler/dataset/image_dataset.py @@ -1,3 +1,4 @@ +import os import numpy as np import pandas as pd @@ -21,7 +22,7 @@ def __init__(self, metadata, sampling_field, channels, dataRoot, keyGen): def getImagePaths(self, r): key = self.keyGen(r) - image = [self.root + "/" + r[ch] for ch in self.channels] + image = [os.path.join(self.root, r[ch]) for ch in self.channels] outlines = self.outlines if outlines is not None: outlines = self.outlines + r["Outlines"] @@ -115,9 +116,9 @@ def read_dataset(config): # Add outlines if specified outlines = None if "outlines" in config["prepare"].keys() and config["prepare"]["outlines"] != "": - df = pd.read_csv(config["paths"]["metadata"] + "/outlines.csv") + df = pd.read_csv(os.path.join(config["paths"]["metadata"], "outlines.csv")) metadata.mergeOutlines(df) - outlines = config["paths"]["root"] + "inputs/outlines/" + outlines = os.path.join(config["paths"]["root"], "inputs", "outlines") print(metadata.data.info()) @@ -128,13 +129,14 @@ def read_dataset(config): metadata.splitMetadata(trainingFilter, validationFilter) # Create a dataset - keyGen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) + keyGen = lambda r: os.path.join(r["Metadata_Plate"], "{}-{}".format(r["Metadata_Well"], r["Metadata_Site"])) + dset = ImageDataset( - metadata, - config["train"]["sampling"]["field"], - config["prepare"]["images"]["channels"], - config["paths"]["images"], - keyGen + metadata=metadata, + sampling_field=config["train"]["sampling"]["field"], + channels=config["prepare"]["images"]["channels"], + dataRoot=os.path.join(config["paths"]["root"], config["paths"]["images"]), + keyGen=keyGen ) # Add training targets @@ -147,5 +149,3 @@ def read_dataset(config): dset.outlines = outlines return dset - - From 7c13b0a5602c68b8284014f9786a317e02e7eac9 Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Fri, 7 Jun 2019 13:51:44 -0400 Subject: [PATCH 3/8] use os.path.join in boxes.py --- deepprofiler/imaging/boxes.py | 14 ++++++++------ 1 file changed, 8 insertions(+), 6 deletions(-) diff --git a/deepprofiler/imaging/boxes.py b/deepprofiler/imaging/boxes.py index dbbb50e..9dc6899 100644 --- a/deepprofiler/imaging/boxes.py +++ b/deepprofiler/imaging/boxes.py @@ -10,12 +10,13 @@ def get_locations(image_key, config, randomize=True, seed=None): keys = image_key.split("/") - locations_file = "{}/{}-{}.csv".format( - keys[0], + locations_file = os.path.join(keys[0], "{}-{}.csv".format( keys[1], config["train"]["sampling"]["locations_field"] - ) - locations_path = os.path.join(config["paths"]["locations"], locations_file) + )) + locations_path = os.path.join(config["paths"]["root"], + config["paths"]["locations"], + locations_file) if os.path.exists(locations_path): locations = pd.read_csv(locations_path) random_sample = config["train"]["sampling"]["locations"] @@ -28,11 +29,13 @@ def get_locations(image_key, config, randomize=True, seed=None): x_key = config["train"]["sampling"]["locations_field"] + "_Location_Center_X" return pd.DataFrame(columns=[x_key, y_key]) + def load_batch(dataset, config): batch = dataset.getTrainBatch(config["train"]["sampling"]["images"]) batch["locations"] = [ get_locations(x, config) for x in batch["keys"] ] return batch + def prepare_boxes(batch, config): locationsBatch = batch["locations"] image_targets = batch["targets"] @@ -78,6 +81,5 @@ def prepare_boxes(batch, config): result = (np.concatenate(all_boxes), np.concatenate(all_indices), [np.concatenate(t) for t in all_targets], - np.concatenate(all_masks) - ) + np.concatenate(all_masks)) return result From 85025fb44d6b05acff88d03191ee4a2b2176bf19 Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Fri, 7 Jun 2019 13:56:18 -0400 Subject: [PATCH 4/8] use os.path.join in model.py --- deepprofiler/learning/model.py | 27 ++++++++++++++++++--------- 1 file changed, 18 insertions(+), 9 deletions(-) diff --git a/deepprofiler/learning/model.py b/deepprofiler/learning/model.py index e692723..e410f49 100644 --- a/deepprofiler/learning/model.py +++ b/deepprofiler/learning/model.py @@ -76,8 +76,8 @@ def train(self, epoch=1, metrics=["accuracy"], verbose=1): verbose=verbose, initial_epoch=epoch - 1, validation_data=(x_validation, y_validation) - ) - + ) + # Stop threads and close sessions close(self, crop_session) # Return the feature model and validation data @@ -127,10 +127,11 @@ def start_val_session(dpmodel, configuration): keras.backend.set_session(val_session) dpmodel.val_crop_generator.start(val_session) x_validation, y_validation = deepprofiler.learning.validation.validate( - dpmodel.config, - dpmodel.dset, - dpmodel.val_crop_generator, - val_session) + config=dpmodel.config, + dset=dpmodel.dset, + crop_generator=dpmodel.val_crop_generator, + session=val_session + ) gc.collect() return val_session, x_validation, y_validation @@ -142,7 +143,9 @@ def start_main_session(configuration): def load_weights(dpmodel, epoch): - output_file = dpmodel.config["paths"]["checkpoints"] + "/checkpoint_{epoch:04d}.hdf5" + output_file = os.path.join(dpmodel.config["paths"]["root"], + dpmodel.config["paths"]["checkpoints"], + "checkpoint_{epoch:04d}.hdf5") previous_model = output_file.format(epoch=epoch - 1) if epoch >= 1 and os.path.isfile(previous_model): dpmodel.feature_model.load_weights(previous_model) @@ -153,13 +156,19 @@ def load_weights(dpmodel, epoch): def setup_callbacks(dpmodel): - output_file = dpmodel.config["paths"]["checkpoints"] + "/checkpoint_{epoch:04d}.hdf5" + output_file = os.path.join(dpmodel.config["paths"]["root"], + dpmodel.config["paths"]["checkpoints"], + "checkpoint_{epoch:04d}.hdf5") + callback_model_checkpoint = keras.callbacks.ModelCheckpoint( filepath=output_file, save_weights_only=True, save_best_only=False ) - csv_output = dpmodel.config["paths"]["logs"] + "/log.csv" + + csv_output = os.path.join(dpmodel.config["paths"]["root"], + dpmodel.config["paths"]["logs"], + "log.csv") callback_csv = keras.callbacks.CSVLogger(filename=csv_output) callbacks = [callback_model_checkpoint, callback_csv] return callbacks From cd6c0ff77c20b3821cccb5ce98dd06528a948409 Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Tue, 30 Jul 2019 10:25:22 -0400 Subject: [PATCH 5/8] no need to double up on data root since root now inits in __main__ --- deepprofiler/dataset/image_dataset.py | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/deepprofiler/dataset/image_dataset.py b/deepprofiler/dataset/image_dataset.py index 55c2520..a3da902 100644 --- a/deepprofiler/dataset/image_dataset.py +++ b/deepprofiler/dataset/image_dataset.py @@ -135,7 +135,7 @@ def read_dataset(config): metadata=metadata, sampling_field=config["train"]["sampling"]["field"], channels=config["dataset"]["images"]["channels"], - dataRoot=os.path.join(config["paths"]["root"], config["paths"]["images"]), + dataRoot=config["paths"]["images"], keyGen=keyGen ) From 0cfcc98eb53d7f8bd5128a1e3177442c9fee47eb Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Tue, 30 Jul 2019 10:26:40 -0400 Subject: [PATCH 6/8] correct config paths root key also, no need to append backslash --- tests/deepprofiler/dataset/test_image_dataset.py | 8 ++++---- 1 file changed, 4 insertions(+), 4 deletions(-) diff --git a/tests/deepprofiler/dataset/test_image_dataset.py b/tests/deepprofiler/dataset/test_image_dataset.py index 5ce92f2..3c035c2 100644 --- a/tests/deepprofiler/dataset/test_image_dataset.py +++ b/tests/deepprofiler/dataset/test_image_dataset.py @@ -25,14 +25,14 @@ def config(out_dir): config = json.load(f) for path in config["paths"]: config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir + config["paths"]["root"] = out_dir return config @pytest.fixture(scope="function") def make_struct(config): for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") + if key not in ["index", "config_file", "root"]: + os.makedirs(path) return @@ -61,7 +61,7 @@ def metadata(out_dir, make_struct, config): @pytest.fixture(scope="function") def dataset(metadata, config, make_struct): keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - return deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) + return deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root"], keygen) def test_init(metadata, out_dir, dataset, config, make_struct): From 94db30adb957d5463de55b5dea5fa52371a7a917 Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Tue, 30 Jul 2019 17:43:18 -0400 Subject: [PATCH 7/8] add conftest for common fixtures --- tests/conftest.py | 242 ++++++++++++++++++++++++++++++++++++++++++++++ 1 file changed, 242 insertions(+) create mode 100644 tests/conftest.py diff --git a/tests/conftest.py b/tests/conftest.py new file mode 100644 index 0000000..0dd5416 --- /dev/null +++ b/tests/conftest.py @@ -0,0 +1,242 @@ +import os +import json +import random +import pytest +import importlib +import numpy as np +import pandas as pd +import skimage.io + +import deepprofiler.dataset.image_dataset +import deepprofiler.dataset.metadata +import deepprofiler.imaging.cropping +import deepprofiler.learning.profiling +import deepprofiler.dataset.target + +# Common functions and fixtures +def __rand_array(): + return np.array(random.sample(range(100), 12)) + + +@pytest.fixture(scope="function") +def out_dir(tmpdir): + return os.path.abspath(tmpdir.mkdir("test")) + + +@pytest.fixture(scope="function") +def config(out_dir): + with open(os.path.join("tests", "files", "config", "test.json"), "r") as f: + config = json.load(f) + for path in config["paths"]: + config["paths"][path] = out_dir + config["paths"].get(path) + config["paths"]["root"] = out_dir + return config + + +@pytest.fixture(scope="function") +def make_struct(config): + for key, path in config["paths"].items(): + if key not in ["index", "config_file", "root"]: + os.makedirs(path) + return + + +# test specific fixtures +@pytest.fixture(scope="function") +def imaging_metadata(out_dir, make_struct, config): + filename = os.path.join(config["paths"]["metadata"], "index.csv") + df = pd.DataFrame({ + "Metadata_Plate": __rand_array(), + "Metadata_Well": __rand_array(), + "Metadata_Site": __rand_array(), + "R": [str(x) + ".png" for x in __rand_array()], + "G": [str(x) + ".png" for x in __rand_array()], + "B": [str(x) + ".png" for x in __rand_array()], + "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], + "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] + }, dtype=int) + df.to_csv(filename, index=False) + meta = deepprofiler.dataset.metadata.Metadata(filename) + train_rule = lambda data: data["Split"].astype(int) == 0 + val_rule = lambda data: data["Split"].astype(int) == 1 + meta.splitMetadata(train_rule, val_rule) + return meta + + +@pytest.fixture(scope="function") +def imaging_dataset(imaging_metadata, config, make_struct): + keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) + return deepprofiler.dataset.image_dataset.ImageDataset(imaging_metadata, "Sampling", ["R", "G", "B"], config["paths"]["root"], keygen) + + +@pytest.fixture(scope="function") +def boxes_dataset(imaging_metadata, config, make_struct): + keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) + return deepprofiler.dataset.image_dataset.ImageDataset(imaging_metadata, "Sampling", ["R", "G", "B"], config["paths"]["root"], keygen) + + +@pytest.fixture(scope="function") +def loadbatch(boxes_dataset, imaging_metadata, out_dir, config, make_struct): + images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) + for i in range(0, 36, 3): + skimage.io.imsave(os.path.join(out_dir, boxes_dataset.meta.data["R"][i // 3]), images[:, :, i]) + skimage.io.imsave(os.path.join(out_dir, boxes_dataset.meta.data["G"][i // 3]), images[:, :, i + 1]) + skimage.io.imsave(os.path.join(out_dir, boxes_dataset.meta.data["B"][i // 3]), images[:, :, i + 2]) + result = deepprofiler.imaging.boxes.load_batch(boxes_dataset, config) + return result + + +@pytest.fixture(scope="function") +def crop_metadata(config, make_struct): + filename = os.path.join(config["paths"]["metadata"], "index.csv") + df = pd.DataFrame({ + "Metadata_Plate": __rand_array(), + "Metadata_Well": __rand_array(), + "Metadata_Site": __rand_array(), + "R": [str(x) + ".png" for x in __rand_array()], + "G": [str(x) + ".png" for x in __rand_array()], + "B": [str(x) + ".png" for x in __rand_array()], + "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], + "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], + "Target": [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] + }, dtype=int) + df.to_csv(filename, index=False) + meta = deepprofiler.dataset.metadata.Metadata(filename) + train_rule = lambda data: data["Split"].astype(int) == 0 + val_rule = lambda data: data["Split"].astype(int) == 1 + meta.splitMetadata(train_rule, val_rule) + return meta + + +@pytest.fixture(scope="function") +def crop_dataset(crop_metadata, config, make_struct): + keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) + dset = deepprofiler.dataset.image_dataset.ImageDataset(crop_metadata, "Sampling", ["R", "G", "B"], config["paths"]["root"], keygen) + target = deepprofiler.dataset.target.MetadataColumnTarget("Target", crop_metadata.data["Target"].unique()) + dset.add_target(target) + return dset + + +@pytest.fixture(scope="function") +def crop_generator(config, crop_dataset): + return deepprofiler.imaging.cropping.CropGenerator(config, crop_dataset) + + +@pytest.fixture(scope="function") +def single_image_crop_generator(config, crop_dataset): + return deepprofiler.imaging.cropping.SingleImageCropGenerator(config, crop_dataset) + + +@pytest.fixture(scope="function") +def prepared_crop_generator(crop_generator, out_dir): + images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) + for i in range(0, 36, 3): + skimage.io.imsave(os.path.join(out_dir, crop_generator.dset.meta.data["R"][i // 3]), images[:, :, i]) + skimage.io.imsave(os.path.join(out_dir, crop_generator.dset.meta.data["G"][i // 3]), images[:, :, i + 1]) + skimage.io.imsave(os.path.join(out_dir, crop_generator.dset.meta.data["B"][i // 3]), images[:, :, i + 2]) + crop_generator.build_input_graph() + crop_generator.build_augmentation_graph() + return crop_generator + + +@pytest.fixture(scope="function") +def model_dataset(imaging_metadata, out_dir, config, make_struct): + keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) + dset = deepprofiler.dataset.image_dataset.ImageDataset(imaging_metadata, "Sampling", ["R", "G", "B"], config["paths"]["root"], keygen) + target = deepprofiler.dataset.target.MetadataColumnTarget("Class", imaging_metadata.data["Class"].unique()) + dset.add_target(target) + return dset + + +@pytest.fixture(scope="function") +def model_data(imaging_metadata, out_dir, config, make_struct): + images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) + for i in range(0, 36, 3): + skimage.io.imsave(os.path.join(config["paths"]["root"], imaging_metadata.data["R"][i // 3]), images[:, :, i]) + skimage.io.imsave(os.path.join(config["paths"]["root"], imaging_metadata.data["G"][i // 3]), images[:, :, i + 1]) + skimage.io.imsave(os.path.join(config["paths"]["root"], imaging_metadata.data["B"][i // 3]), images[:, :, i + 2]) + + +@pytest.fixture(scope="function") +def model(config, model_dataset, crop_generator_plugin, val_crop_generator_plugin): + def create(): + module = importlib.import_module("plugins.models.{}".format(config["train"]["model"]["name"])) + importlib.invalidate_caches() + dpmodel = module.ModelClass(config, model_dataset, crop_generator_plugin, val_crop_generator_plugin) + return dpmodel + return create + + +@pytest.fixture(scope="function") +def val_crop_generator(config): + module = importlib.import_module("plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) + importlib.invalidate_caches() + generator = module.SingleImageGeneratorClass + return generator + + +@pytest.fixture(scope="function") +def locations(out_dir, imaging_metadata, config, make_struct): + for i in range(len(imaging_metadata.data.index)): + meta = imaging_metadata.data.iloc[i] + path = os.path.abspath(os.path.join(config["paths"]["locations"], meta["Metadata_Plate"])) + os.makedirs(path, exist_ok=True) + path = os.path.abspath(os.path.join(path, "{}-{}-{}.csv".format(meta["Metadata_Well"], + meta["Metadata_Site"], + config["train"]["sampling"]["locations_field"]))) + locs = pd.DataFrame({ + "R_Location_Center_X": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])), + "R_Location_Center_Y": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])) + }) + locs.to_csv(path, index=False) + + +@pytest.fixture(scope="function") +def checkpoint(config, crop_dataset): + crop_generator = importlib.import_module( + "plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) \ + .GeneratorClass + profile_crop_generator = importlib.import_module( + "plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) \ + .SingleImageGeneratorClass + dpmodel = importlib.import_module("plugins.models.{}".format(config["train"]["model"]["name"])) \ + .ModelClass(config, crop_dataset, crop_generator, profile_crop_generator) + dpmodel.feature_model.compile(dpmodel.optimizer, dpmodel.loss) + filename = os.path.join(config["paths"]["checkpoints"], config["profile"]["checkpoint"]) + dpmodel.feature_model.save_weights(filename) + return filename + + +@pytest.fixture(scope="function") +def profile(config, crop_dataset): + return deepprofiler.learning.profiling.Profile(config, crop_dataset) + + +@pytest.fixture(scope="function") +def crop_generator_plugin(config): + module = importlib.import_module("plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) + importlib.invalidate_caches() + generator = module.GeneratorClass + return generator + + +@pytest.fixture(scope="function") +def val_crop_generator_plugin(config): + module = importlib.import_module("plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) + importlib.invalidate_caches() + generator = module.SingleImageGeneratorClass + return generator + + +@pytest.fixture(scope="function") +def session(): + configuration = tf.ConfigProto() + configuration.gpu_options.visible_device_list = "0" + session = tf.Session(config=configuration) + return session + + +@pytest.fixture(scope="function") +def validation(config, dataset, crop_generator, session): + return deepprofiler.learning.validation.Validation(config, dataset, crop_generator, session) From ffc97cd1cf3bcf79000524c6026c8e767154c0d0 Mon Sep 17 00:00:00 2001 From: gwaygenomics Date: Tue, 30 Jul 2019 17:43:48 -0400 Subject: [PATCH 8/8] WIP testing conftest --- .../dataset/test_image_dataset.py | 113 +++++----------- tests/deepprofiler/imaging/test_boxes.py | 81 ++---------- tests/deepprofiler/imaging/test_cropping.py | 88 +------------ tests/deepprofiler/learning/test_model.py | 120 +---------------- tests/deepprofiler/learning/test_profiling.py | 121 ++---------------- tests/deepprofiler/learning/test_training.py | 81 ------------ .../deepprofiler/learning/test_validation.py | 102 --------------- .../test_autoencoder_crop_generator.py | 57 --------- 8 files changed, 62 insertions(+), 701 deletions(-) diff --git a/tests/deepprofiler/dataset/test_image_dataset.py b/tests/deepprofiler/dataset/test_image_dataset.py index 3c035c2..2fb9d87 100644 --- a/tests/deepprofiler/dataset/test_image_dataset.py +++ b/tests/deepprofiler/dataset/test_image_dataset.py @@ -15,82 +15,33 @@ def __rand_array(): return np.array(random.sample(range(100), 12)) -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root"]: - os.makedirs(path) - return - - -@pytest.fixture(scope="function") -def metadata(out_dir, make_struct, config): - filename = os.path.join(config["paths"]["metadata"], "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - return deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root"], keygen) - - -def test_init(metadata, out_dir, dataset, config, make_struct): +def test_init(imaging_metadata, out_dir, imaging_dataset, config, make_struct): sampling_field = config["train"]["sampling"]["field"] channels = config["dataset"]["images"]["channels"] keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, sampling_field, channels, out_dir, keygen) - assert dset.meta == metadata + dset = deepprofiler.dataset.image_dataset.ImageDataset(imaging_metadata, sampling_field, channels, out_dir, keygen) + assert dset.meta == imaging_metadata assert dset.sampling_field == sampling_field - np.testing.assert_array_equal(dset.sampling_values, metadata.data["Sampling"].unique()) + np.testing.assert_array_equal(dset.sampling_values, imaging_metadata.data["Sampling"].unique()) assert dset.channels == channels assert dset.root == out_dir assert dset.keyGen == keygen -def test_get_image_paths(metadata, out_dir, dataset, config, make_struct): - for idx, row in dataset.meta.data.iterrows(): - key, image, outlines = dataset.getImagePaths(row) - testKey = dataset.keyGen(row) - testImage = [dataset.root + "/" + row[ch] for ch in dataset.channels] - testOutlines = dataset.outlines +def test_get_image_paths(imaging_metadata, out_dir, imaging_dataset, config, make_struct): + for idx, row in imaging_dataset.meta.data.iterrows(): + key, image, outlines = imaging_dataset.getImagePaths(row) + testKey = imaging_dataset.keyGen(row) + testImage = [imaging_dataset.root + "/" + row[ch] for ch in imaging_dataset.channels] + testOutlines = imaging_dataset.outlines assert key == testKey assert image == testImage assert outlines == testOutlines -def test_sample_images(metadata, out_dir, dataset, config, make_struct): +def test_sample_images(imaging_metadata, out_dir, imaging_dataset, config, make_struct): n = 3 - keys, images, targets, outlines = dataset.sampleImages(dataset.sampling_values, n) + keys, images, targets, outlines = imaging_dataset.sampleImages(imaging_dataset.sampling_values, n) print(keys, images, targets, outlines) assert len(keys) == 2 * n assert len(images) == 2 * n @@ -98,14 +49,14 @@ def test_sample_images(metadata, out_dir, dataset, config, make_struct): assert len(outlines) == 2 * n -def test_get_train_batch(metadata, out_dir, dataset, config, make_struct): +def test_get_train_batch(imaging_metadata, out_dir, imaging_dataset, config, make_struct): images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["B"][i // 3]), images[:, :, i + 2]) + skimage.io.imsave(os.path.join(out_dir, imaging_dataset.meta.data["R"][i // 3]), images[:, :, i]) + skimage.io.imsave(os.path.join(out_dir, imaging_dataset.meta.data["G"][i // 3]), images[:, :, i + 1]) + skimage.io.imsave(os.path.join(out_dir, imaging_dataset.meta.data["B"][i // 3]), images[:, :, i + 2]) batch_size = 3 - batch = dataset.getTrainBatch(batch_size) + batch = imaging_dataset.getTrainBatch(batch_size) assert len(batch) == batch_size for image in batch["images"]: assert image.shape == (128, 128, 3) @@ -113,12 +64,12 @@ def test_get_train_batch(metadata, out_dir, dataset, config, make_struct): assert image[:, :, i] in np.rollaxis(images, -1) -def test_scan(metadata, out_dir, dataset, config, make_struct): +def test_scan(imaging_metadata, out_dir, imaging_dataset, config, make_struct): images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["B"][i // 3]), images[:, :, i + 2]) + skimage.io.imsave(os.path.join(out_dir, imaging_dataset.meta.data["R"][i // 3]), images[:, :, i]) + skimage.io.imsave(os.path.join(out_dir, imaging_dataset.meta.data["G"][i // 3]), images[:, :, i + 1]) + skimage.io.imsave(os.path.join(out_dir, imaging_dataset.meta.data["B"][i // 3]), images[:, :, i + 2]) data = {"index": [], "image": [], "meta": []} def func(index, image, meta): @@ -126,7 +77,7 @@ def func(index, image, meta): data["image"].append(image) data["meta"].append(meta) - dataset.scan(func, frame="all") + imaging_dataset.scan(func, frame="all") for index in data["index"]: assert index in range(12) for image in data["image"]: @@ -134,23 +85,23 @@ def func(index, image, meta): for i in range(3): assert image[:, :, i] in np.rollaxis(images, -1) for meta in data["meta"]: - assert (dataset.meta.data == meta).all(1).any() + assert (imaging_dataset.meta.data == meta).all(1).any() -def test_number_of_records(metadata, out_dir, dataset, config, make_struct): - assert dataset.number_of_records("all") == len(dataset.meta.data) - assert dataset.number_of_records("val") == len(dataset.meta.val) - assert dataset.number_of_records("train") == len(dataset.meta.train) - assert dataset.number_of_records("other") == 0 +def test_number_of_records(imaging_metadata, out_dir, imaging_dataset, config, make_struct): + assert imaging_dataset.number_of_records("all") == len(imaging_dataset.meta.data) + assert imaging_dataset.number_of_records("val") == len(imaging_dataset.meta.val) + assert imaging_dataset.number_of_records("train") == len(imaging_dataset.meta.train) + assert imaging_dataset.number_of_records("other") == 0 -def test_add_target(metadata, out_dir, dataset, config, make_struct): +def test_add_target(imaging_metadata, out_dir, imaging_dataset, config, make_struct): target = deepprofiler.dataset.target.MetadataColumnTarget("Target", random.sample(range(100), 12)) - dataset.add_target(target) - assert target in dataset.targets + imaging_dataset.add_target(target) + assert target in imaging_dataset.targets -def test_read_dataset(metadata, out_dir, dataset, config, make_struct): +def test_read_dataset(imaging_metadata, out_dir, imaging_dataset, config, make_struct): dset = deepprofiler.dataset.image_dataset.read_dataset(config) pd.testing.assert_frame_equal(dset.meta.data, deepprofiler.dataset.metadata.Metadata(config["paths"]["index"], dtype=None).data) assert dset.channels == config["dataset"]["images"]["channels"] diff --git a/tests/deepprofiler/imaging/test_boxes.py b/tests/deepprofiler/imaging/test_boxes.py index 854d477..37a1f8d 100644 --- a/tests/deepprofiler/imaging/test_boxes.py +++ b/tests/deepprofiler/imaging/test_boxes.py @@ -8,80 +8,14 @@ import skimage.io import deepprofiler.imaging.boxes -import deepprofiler.dataset.image_dataset -import deepprofiler.dataset.metadata -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - - -@pytest.fixture(scope="function") -def metadata(config, make_struct): - filename = os.path.join(config["paths"]["metadata"], "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], - "Target": [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Target", metadata.data["Target"].unique()) - dset.add_target(target) - return dset - -@pytest.fixture(scope="function") -def loadbatch(dataset, metadata, out_dir, config, make_struct): - images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) - for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(out_dir, dataset.meta.data["B"][i // 3]), images[:, :, i + 2]) - result = deepprofiler.imaging.boxes.load_batch(dataset, config) - return result def test_get_locations(config, make_struct): test_image_key = "dog/cat" test_output = deepprofiler.imaging.boxes.get_locations(test_image_key, config) expected_output = pd.DataFrame(columns=["R_Location_Center_X", "R_Location_Center_Y"]) assert test_output.equals(expected_output) - + test_locations_path = os.path.abspath(os.path.join(config["paths"]["locations"], "dog")) os.makedirs(test_locations_path) test_file_name = "cat-R.csv" @@ -92,31 +26,32 @@ def test_get_locations(config, make_struct): assert os.path.exists(test_locations_path) == True test_output = deepprofiler.imaging.boxes.get_locations(test_image_key, config) assert test_output.equals(expected_output) - + expected_output = pd.DataFrame(index=range(10),columns=["R_Location_Center_X", "R_Location_Center_Y"]) expected_output.to_csv(test_locations_path,mode="w") expected_output=pd.read_csv(test_locations_path) test_output = deepprofiler.imaging.boxes.get_locations(test_image_key, config) assert test_output.equals(expected_output) - + expected_output = pd.DataFrame(index=range(60),columns=["R_Location_Center_X", "R_Location_Center_Y"]) expected_output.to_csv(test_locations_path,mode="w") expected_output = pd.read_csv(test_locations_path) expected_output = expected_output.sample(n=10,random_state=1414) test_output = deepprofiler.imaging.boxes.get_locations(test_image_key, config,randomize=True,seed=1414) assert test_output.equals(expected_output) - + + def test_load_batch(loadbatch): test_batch = loadbatch expected_batch_locations = 12*[pd.DataFrame(columns=["R_Location_Center_X", "R_Location_Center_Y"])] for i in range(12): assert test_batch["locations"][i].equals(expected_batch_locations[i]) - + + def test_prepare_boxes(config): test_batch = {"images": [np.random.randint(256, size=(64, 64), dtype=np.uint16)], "targets": [[1]], "locations": [pd.DataFrame(data=[[32,32]],columns=["R_Location_Center_X", "R_Location_Center_Y"])]} - test_result = deepprofiler.imaging.boxes.prepare_boxes(test_batch,config) + test_result = deepprofiler.imaging.boxes.prepare_boxes(test_batch, config) assert np.array(test_result[0]).shape == (1,4) assert np.array(test_result[1]).shape == (1,) assert np.array(test_result[2]).shape == (1,1) #ignores masks for testing - \ No newline at end of file diff --git a/tests/deepprofiler/imaging/test_cropping.py b/tests/deepprofiler/imaging/test_cropping.py index c654a67..9c5b330 100644 --- a/tests/deepprofiler/imaging/test_cropping.py +++ b/tests/deepprofiler/imaging/test_cropping.py @@ -14,82 +14,6 @@ import deepprofiler.imaging.cropping -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - - -@pytest.fixture(scope="function") -def metadata(config, make_struct): - filename = os.path.join(config["paths"]["metadata"], "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1], - "Target": [0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Target", metadata.data["Target"].unique()) - dset.add_target(target) - return dset - -@pytest.fixture(scope="function") -def crop_generator(config, dataset): - return deepprofiler.imaging.cropping.CropGenerator(config, dataset) - - -@pytest.fixture(scope="function") -def single_image_crop_generator(config, dataset): - return deepprofiler.imaging.cropping.SingleImageCropGenerator(config, dataset) - -@pytest.fixture(scope="function") -def prepared_crop_generator(crop_generator, out_dir): - images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) - for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(out_dir, crop_generator.dset.meta.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(out_dir, crop_generator.dset.meta.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(out_dir, crop_generator.dset.meta.data["B"][i // 3]), images[:, :, i + 2]) - crop_generator.build_input_graph() - crop_generator.build_augmentation_graph() - return crop_generator - - def test_crop_graph(): num_crops = 100 channels = 3 @@ -106,10 +30,10 @@ def test_crop_graph(): sess.close() -def test_crop_generator_init(config, dataset): - generator = deepprofiler.imaging.cropping.CropGenerator(config, dataset) +def test_crop_generator_init(config, crop_dataset): + generator = deepprofiler.imaging.cropping.CropGenerator(config, crop_dataset) assert generator.config == config - assert generator.dset == dataset + assert generator.dset == crop_dataset def test_crop_generator_build_input_graph(crop_generator): @@ -190,10 +114,10 @@ def test_crop_generator_stop(prepared_crop_generator): assert prepared_crop_generator.coord.joined -def test_single_image_crop_generator_init(config, dataset): - generator = deepprofiler.imaging.cropping.SingleImageCropGenerator(config, dataset) +def test_single_image_crop_generator_init(config, crop_dataset): + generator = deepprofiler.imaging.cropping.SingleImageCropGenerator(config, crop_dataset) assert generator.config == config - assert generator.dset == dataset + assert generator.dset == crop_dataset def test_single_image_crop_generator_start(single_image_crop_generator): diff --git a/tests/deepprofiler/learning/test_model.py b/tests/deepprofiler/learning/test_model.py index 7ce836f..50f0a59 100644 --- a/tests/deepprofiler/learning/test_model.py +++ b/tests/deepprofiler/learning/test_model.py @@ -9,126 +9,18 @@ import skimage.io import deepprofiler.dataset.target -import deepprofiler.dataset.metadata import deepprofiler.dataset.image_dataset import deepprofiler.imaging.cropping from deepprofiler.learning.model import DeepProfilerModel -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - -@pytest.fixture(scope="function") -def metadata(out_dir, make_struct): - filename = os.path.join(out_dir, "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, out_dir, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Class", metadata.data["Class"].unique()) - dset.add_target(target) - return dset - - -@pytest.fixture(scope="function") -def data(metadata, out_dir, config, make_struct): - images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) - for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["B"][i // 3]), images[:, :, i + 2]) - - -@pytest.fixture(scope="function") -def locations(out_dir, metadata, config, make_struct): - for i in range(len(metadata.data.index)): - meta = metadata.data.iloc[i] - path = os.path.abspath(os.path.join(config["paths"]["locations"], meta["Metadata_Plate"])) - os.makedirs(path, exist_ok=True) - path = os.path.abspath(os.path.join(path, "{}-{}-{}.csv".format(meta["Metadata_Well"], - meta["Metadata_Site"], - config["train"]["sampling"]["locations_field"]))) - locs = pd.DataFrame({ - "R_Location_Center_X": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])), - "R_Location_Center_Y": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])) - }) - locs.to_csv(path, index=False) - - -@pytest.fixture(scope="function") -def crop_generator(config): - module = importlib.import_module("plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) - importlib.invalidate_caches() - generator = module.GeneratorClass - return generator - - -@pytest.fixture(scope="function") -def val_crop_generator(config): - module = importlib.import_module("plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) - importlib.invalidate_caches() - generator = module.SingleImageGeneratorClass - return generator - - -@pytest.fixture(scope="function") -def model(config, dataset, crop_generator, val_crop_generator): - def create(): - module = importlib.import_module("plugins.models.{}".format(config["train"]["model"]["name"])) - importlib.invalidate_caches() - dpmodel = module.ModelClass(config, dataset, crop_generator, val_crop_generator) - return dpmodel - return create - - -def test_init(config, dataset, crop_generator, val_crop_generator): - dpmodel = DeepProfilerModel(config, dataset, crop_generator, val_crop_generator) +def test_init(config, model_dataset, crop_generator_plugin, val_crop_generator_plugin): + dpmodel = DeepProfilerModel(config, model_dataset, crop_generator_plugin, val_crop_generator_plugin) assert dpmodel.feature_model is None assert dpmodel.config == config - assert dpmodel.dset == dataset - assert isinstance(dpmodel.train_crop_generator, crop_generator) - assert isinstance(dpmodel.val_crop_generator, val_crop_generator) + assert dpmodel.dset == model_dataset + assert isinstance(dpmodel.train_crop_generator, crop_generator_plugin) + assert isinstance(dpmodel.val_crop_generator, val_crop_generator_plugin) assert dpmodel.random_seed is None @@ -139,7 +31,7 @@ def test_seed(model): assert model1.random_seed == seed -def test_train(model, out_dir, data, locations, make_struct, config): +def test_train(model, out_dir, make_struct, config): model1 = model() model1.train() assert os.path.exists(os.path.join(config["paths"]["checkpoints"], "checkpoint_0001.hdf5")) diff --git a/tests/deepprofiler/learning/test_profiling.py b/tests/deepprofiler/learning/test_profiling.py index b9a3e9b..70ae22d 100644 --- a/tests/deepprofiler/learning/test_profiling.py +++ b/tests/deepprofiler/learning/test_profiling.py @@ -18,112 +18,11 @@ import skimage.io -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - -@pytest.fixture(scope="function") -def metadata(out_dir, make_struct): - filename = os.path.join(out_dir, "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, out_dir, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Class", metadata.data["Class"].unique()) - dset.add_target(target) - return dset - - -@pytest.fixture(scope="function") -def data(metadata, out_dir, config, make_struct): - images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) - for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["B"][i // 3]), images[:, :, i + 2]) - - -@pytest.fixture(scope="function") -def locations(out_dir, metadata, config, make_struct): - for i in range(len(metadata.data.index)): - meta = metadata.data.iloc[i] - path = os.path.abspath(os.path.join(config["paths"]["locations"], meta["Metadata_Plate"])) - os.makedirs(path, exist_ok=True) - path = os.path.abspath(os.path.join(path, "{}-{}-{}.csv".format(meta["Metadata_Well"], - meta["Metadata_Site"], - config["train"]["sampling"]["locations_field"]))) - locs = pd.DataFrame({ - "R_Location_Center_X": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])), - "R_Location_Center_Y": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])) - }) - locs.to_csv(path, index=False) - -@pytest.fixture(scope="function") -def checkpoint(config, dataset): - crop_generator = importlib.import_module( - "plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) \ - .GeneratorClass - profile_crop_generator = importlib.import_module( - "plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])) \ - .SingleImageGeneratorClass - dpmodel = importlib.import_module("plugins.models.{}".format(config["train"]["model"]["name"])) \ - .ModelClass(config, dataset, crop_generator, profile_crop_generator) - dpmodel.feature_model.compile(dpmodel.optimizer, dpmodel.loss) - filename = os.path.join(config["paths"]["checkpoints"], config["profile"]["checkpoint"]) - dpmodel.feature_model.save_weights(filename) - return filename - - -@pytest.fixture(scope="function") -def profile(config, dataset): - return deepprofiler.learning.profiling.Profile(config, dataset) - - -def test_init(config, dataset): - prof = deepprofiler.learning.profiling.Profile(config, dataset) +def test_init(config, model_dataset): + prof = deepprofiler.learning.profiling.Profile(config, model_dataset) test_num_channels = len(config["dataset"]["images"]["channels"]) assert prof.config == config - assert prof.dset == dataset + assert prof.dset == model_dataset assert prof.num_channels == test_num_channels assert prof.crop_generator == importlib.import_module( "plugins.crop_generators.{}".format(config["train"]["model"]["crop_generator"])).GeneratorClass @@ -138,12 +37,12 @@ def test_configure(profile, checkpoint): assert isinstance(profile.sess, tf.Session) -def test_check(profile, metadata): - assert profile.check(metadata.data) # TODO: test false positive +def test_check(profile, imaging_metadata): + assert profile.check(imaging_metadata.data) # TODO: test false positive -def test_extract_features(profile, metadata, locations, checkpoint): - meta = metadata.data.iloc[0] +def test_extract_features(profile, imaging_metadata, locations, checkpoint): + meta = imaging_metadata.data.iloc[0] image = np.random.randint(0, 256, (128, 128, 3), dtype=np.uint8) profile.configure() profile.extract_features(None, image, meta) @@ -152,9 +51,9 @@ def test_extract_features(profile, metadata, locations, checkpoint): assert os.path.isfile(output_file) -def test_profile(config, dataset, data, locations, checkpoint): - deepprofiler.learning.profiling.profile(config, dataset) - for index, row in dataset.meta.data.iterrows(): +def test_profile(config, model_dataset, model_data, locations, checkpoint): + deepprofiler.learning.profiling.profile(config, model_dataset) + for index, row in model_dataset.meta.data.iterrows(): output_file = config["paths"]["features"] + "/{}_{}_{}.npz" \ .format(row["Metadata_Plate"], row["Metadata_Well"], row["Metadata_Site"]) assert os.path.isfile(output_file) diff --git a/tests/deepprofiler/learning/test_training.py b/tests/deepprofiler/learning/test_training.py index e44f7e5..1268f70 100644 --- a/tests/deepprofiler/learning/test_training.py +++ b/tests/deepprofiler/learning/test_training.py @@ -14,87 +14,6 @@ import deepprofiler.dataset.target -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - -@pytest.fixture(scope="function") -def metadata(out_dir, make_struct): - filename = os.path.join(out_dir, "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, out_dir, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Class", metadata.data["Class"].unique()) - dset.add_target(target) - return dset - - -@pytest.fixture(scope="function") -def data(metadata, out_dir, config, make_struct): - images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) - for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["B"][i // 3]), images[:, :, i + 2]) - - -@pytest.fixture(scope="function") -def locations(out_dir, metadata, config, make_struct): - for i in range(len(metadata.data.index)): - meta = metadata.data.iloc[i] - path = os.path.abspath(os.path.join(config["paths"]["locations"], meta["Metadata_Plate"])) - os.makedirs(path, exist_ok=True) - path = os.path.abspath(os.path.join(path, "{}-{}-{}.csv".format(meta["Metadata_Well"], - meta["Metadata_Site"], - config["train"]["sampling"]["locations_field"]))) - locs = pd.DataFrame({ - "R_Location_Center_X": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])), - "R_Location_Center_Y": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])) - }) - locs.to_csv(path, index=False) - - def test_learn_model(config, dataset, data, locations, out_dir, make_struct): epoch = 1 verbose = 1 diff --git a/tests/deepprofiler/learning/test_validation.py b/tests/deepprofiler/learning/test_validation.py index 6323b3b..381b538 100644 --- a/tests/deepprofiler/learning/test_validation.py +++ b/tests/deepprofiler/learning/test_validation.py @@ -16,108 +16,6 @@ import deepprofiler.learning.validation -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - - -@pytest.fixture(scope="function") -def metadata(out_dir, make_struct): - filename = os.path.join(out_dir, "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, out_dir, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Class", metadata.data["Class"].unique()) - dset.add_target(target) - return dset - - -@pytest.fixture(scope="function") -def data(metadata, out_dir, config, make_struct): - images = np.random.randint(0, 256, (128, 128, 36), dtype=np.uint8) - for i in range(0, 36, 3): - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["R"][i // 3]), images[:, :, i]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["G"][i // 3]), images[:, :, i + 1]) - skimage.io.imsave(os.path.join(config["paths"]["root_dir"], metadata.data["B"][i // 3]), images[:, :, i + 2]) - - -@pytest.fixture(scope="function") -def locations(out_dir, metadata, config, make_struct): - for i in range(len(metadata.data.index)): - meta = metadata.data.iloc[i] - path = os.path.abspath(os.path.join(config["paths"]["locations"], meta["Metadata_Plate"])) - os.makedirs(path, exist_ok=True) - path = os.path.abspath(os.path.join(path, "{}-{}-{}.csv".format(meta["Metadata_Well"], - meta["Metadata_Site"], - config["train"]["sampling"]["locations_field"]))) - locs = pd.DataFrame({ - "R_Location_Center_X": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])), - "R_Location_Center_Y": np.random.randint(0, 128, (config["train"]["sampling"]["locations"])) - }) - locs.to_csv(path, index=False) - - -@pytest.fixture(scope="function") -def session(): - configuration = tf.ConfigProto() - configuration.gpu_options.visible_device_list = "0" - session = tf.Session(config = configuration) - return session - - -@pytest.fixture(scope="function") -def crop_generator(config, dataset, session): - crop_generator = deepprofiler.imaging.cropping.SingleImageCropGenerator(config, dataset) - crop_generator.start(session) - return crop_generator - - -@pytest.fixture(scope="function") -def validation(config, dataset, crop_generator, session): - return deepprofiler.learning.validation.Validation(config, dataset, crop_generator, session) - - def test_init(config, dataset, crop_generator, session, validation): validation = validation assert validation.config == config diff --git a/tests/plugins/crop_generators/test_autoencoder_crop_generator.py b/tests/plugins/crop_generators/test_autoencoder_crop_generator.py index ee3349e..e8826e3 100644 --- a/tests/plugins/crop_generators/test_autoencoder_crop_generator.py +++ b/tests/plugins/crop_generators/test_autoencoder_crop_generator.py @@ -15,63 +15,6 @@ import plugins.crop_generators.autoencoder_crop_generator -def __rand_array(): - return np.array(random.sample(range(100), 12)) - - -@pytest.fixture(scope="function") -def out_dir(tmpdir): - return os.path.abspath(tmpdir.mkdir("test")) - - -@pytest.fixture(scope="function") -def config(out_dir): - with open("tests/files/config/test.json", "r") as f: - config = json.load(f) - for path in config["paths"]: - config["paths"][path] = out_dir + config["paths"].get(path) - config["paths"]["root_dir"] = out_dir - return config - -@pytest.fixture(scope="function") -def make_struct(config): - for key, path in config["paths"].items(): - if key not in ["index", "config_file", "root_dir"]: - os.makedirs(path+"/") - return - - -@pytest.fixture(scope="function") -def metadata(out_dir, make_struct): - filename = os.path.join(out_dir, "index.csv") - df = pd.DataFrame({ - "Metadata_Plate": __rand_array(), - "Metadata_Well": __rand_array(), - "Metadata_Site": __rand_array(), - "R": [str(x) + ".png" for x in __rand_array()], - "G": [str(x) + ".png" for x in __rand_array()], - "B": [str(x) + ".png" for x in __rand_array()], - "Class": ["0", "1", "2", "3", "0", "1", "2", "3", "0", "1", "2", "3"], - "Sampling": [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1], - "Split": [0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1] - }, dtype=int) - df.to_csv(filename, index=False) - meta = deepprofiler.dataset.metadata.Metadata(filename) - train_rule = lambda data: data["Split"].astype(int) == 0 - val_rule = lambda data: data["Split"].astype(int) == 1 - meta.splitMetadata(train_rule, val_rule) - return meta - - -@pytest.fixture(scope="function") -def dataset(metadata, out_dir, config, make_struct): - keygen = lambda r: "{}/{}-{}".format(r["Metadata_Plate"], r["Metadata_Well"], r["Metadata_Site"]) - dset = deepprofiler.dataset.image_dataset.ImageDataset(metadata, "Sampling", ["R", "G", "B"], config["paths"]["root_dir"], keygen) - target = deepprofiler.dataset.target.MetadataColumnTarget("Class", metadata.data["Class"].unique()) - dset.add_target(target) - return dset - - def test_autoencoder_crop_generator(): assert issubclass(plugins.crop_generators.autoencoder_crop_generator.GeneratorClass, deepprofiler.imaging.cropping.CropGenerator) assert issubclass(plugins.crop_generators.autoencoder_crop_generator.SingleImageGeneratorClass, deepprofiler.imaging.cropping.SingleImageCropGenerator)