Skip to content

Commit

Permalink
Use bioimageio objects instead of mocking for testing
Browse files Browse the repository at this point in the history
- For non-unix systems, when launching a new process, `spawn` can be used instead of `fork`. With `spawn`, the memory of the parent process isn't copied to the child one as `fork`. Mock objects can't be serialized, and tests were failing due to this.
- The `PredictionPipeline` object was created in the main process before the creation of the child process, and then it was serialized to be transferred to the child one. Then again it was deserialized. This is redundant, since initially the `PredictionPipeline` is created by a bytes value. So both the parent and the child process use the bytes to construct the same `PredictionPipeline`.
  • Loading branch information
thodkatz committed Aug 29, 2024
1 parent 629893b commit d84c70f
Show file tree
Hide file tree
Showing 7 changed files with 216 additions and 140 deletions.
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -27,7 +27,7 @@
],
packages=find_packages(exclude=["tests"]), # Required
install_requires=[
"bioimageio.spec==0.5.3.post4",
"bioimageio.spec==0.5.3.2",
"bioimageio.core==0.6.7",
"grpcio>=1.31",
"numpy",
Expand Down
257 changes: 162 additions & 95 deletions tests/conftest.py
Original file line number Diff line number Diff line change
@@ -1,22 +1,45 @@
import faulthandler
import io
import logging.handlers
import multiprocessing as mp
import signal
import sys
import tempfile
import threading
from os import getenv
from pathlib import Path
from random import randint
from typing import Generator, List, Tuple
from unittest.mock import create_autospec, patch
from typing import List, Tuple

import numpy as np
import pytest
import torch
import xarray as xr
from bioimageio.core import AxisId, PredictionPipeline, Sample, Tensor
from bioimageio.spec.model import v0_5
from bioimageio.spec.model.v0_5 import TensorId

from tiktorch.server.session import process
from bioimageio.core import AxisId
from bioimageio.spec import save_bioimageio_package_to_stream
from bioimageio.spec.model.v0_5 import (
ArchitectureFromLibraryDescr,
Author,
BatchAxis,
ChannelAxis,
CiteEntry,
Doi,
FileDescr,
HttpUrl,
InputAxis,
InputTensorDescr,
LicenseId,
ModelDescr,
OutputTensorDescr,
ParameterizedSize,
PytorchStateDictWeightsDescr,
SizeReference,
SpaceInputAxis,
SpaceOutputAxis,
Version,
WeightsDescr,
)
from torch import nn


@pytest.fixture
Expand Down Expand Up @@ -65,115 +88,159 @@ def assert_threads_cleanup():
pytest.fail("Threads still running:\n\t%s" % "\n\t".join(running_threads))


MockedPredictionPipeline = Generator[Tuple[PredictionPipeline, Sample], None, None]


def patched_prediction_pipeline(mocked_prediction_pipeline: PredictionPipeline):
return patch.object(process, "_get_prediction_pipeline_from_model_bytes", lambda *args: mocked_prediction_pipeline)


@pytest.fixture
def bioimage_model_explicit_siso() -> MockedPredictionPipeline:
mocked_prediction_pipeline, mocked_output_sample = _bioimage_model_siso(
def bioimage_model_explicit_siso() -> Tuple[io.BytesIO, xr.DataArray]:
test_tensor = np.arange(1 * 2 * 10 * 10, dtype="float32").reshape(1, 2, 10, 10)
model_descr, expected_output = _bioimage_model_siso(
[
v0_5.BatchAxis(),
v0_5.ChannelAxis(channel_names=["channel1", "channel2"]),
v0_5.SpaceInputAxis(id="x", size=10),
v0_5.SpaceInputAxis(id="y", size=10),
]
BatchAxis(),
ChannelAxis(channel_names=["channel1", "channel2"]),
SpaceInputAxis(id="x", size=10),
SpaceInputAxis(id="y", size=10),
],
test_tensor,
)
with patched_prediction_pipeline(mocked_prediction_pipeline):
yield mocked_prediction_pipeline, mocked_output_sample
model_bytes = io.BytesIO()
save_bioimageio_package_to_stream(model_descr, output_stream=model_bytes)
return model_bytes, expected_output


@pytest.fixture
def bioimage_model_param_siso() -> MockedPredictionPipeline:
mocked_prediction_pipeline, mocked_output_sample = _bioimage_model_siso(
def bioimage_model_param_siso() -> Tuple[io.BytesIO, xr.DataArray]:
test_tensor = np.arange(1 * 2 * 10 * 20, dtype="float32").reshape(1, 2, 10, 20)
model_descr, expected_output = _bioimage_model_siso(
[
v0_5.BatchAxis(),
v0_5.ChannelAxis(channel_names=["channel1", "channel2"]),
v0_5.SpaceInputAxis(id="x", size=v0_5.ParameterizedSize(min=10, step=2)),
v0_5.SpaceInputAxis(id="y", size=v0_5.ParameterizedSize(min=20, step=3)),
]
BatchAxis(),
ChannelAxis(channel_names=["channel1", "channel2"]),
SpaceInputAxis(id="x", size=ParameterizedSize(min=10, step=2)),
SpaceInputAxis(id="y", size=ParameterizedSize(min=20, step=3)),
],
test_tensor,
)
with patched_prediction_pipeline(mocked_prediction_pipeline):
yield mocked_prediction_pipeline, mocked_output_sample
model_bytes = io.BytesIO()
save_bioimageio_package_to_stream(model_descr, output_stream=model_bytes)
return model_bytes, expected_output


def _bioimage_model_siso(input_axes: List[v0_5.InputAxis]) -> Tuple[PredictionPipeline, Sample]:
def _bioimage_model_siso(input_axes: List[InputAxis], test_tensor: np.array) -> Tuple[ModelDescr, xr.DataArray]:
"""
Mocked bioimageio prediction pipeline with single input single output
"""
with tempfile.NamedTemporaryFile(suffix=".npy", delete=False) as test_tensor_file:
np.save(test_tensor_file.name, test_tensor)

mocked_input = create_autospec(v0_5.InputTensorDescr)
mocked_input.id = "input"
mocked_input.axes = input_axes
return _bioimage_model([mocked_input])
input_tensor = InputTensorDescr(
id="input", axes=input_axes, description="", test_tensor=FileDescr(source=Path(test_tensor_file.name))
)
return _bioimage_model([input_tensor])


@pytest.fixture
def bioimage_model_miso() -> MockedPredictionPipeline:
def bioimage_model_miso() -> Tuple[io.BytesIO, xr.DataArray]:
"""
Mocked bioimageio prediction pipeline with three inputs single output
"""
test_tensor1 = np.arange(1 * 2 * 10 * 10, dtype="float32").reshape(1, 2, 10, 10)
test_tensor2 = np.arange(1 * 2 * 10 * 10, dtype="float32").reshape(1, 2, 10, 10)
test_tensor3 = np.arange(1 * 2 * 10 * 10, dtype="float32").reshape(1, 2, 10, 10)

with tempfile.NamedTemporaryFile(suffix=".npy", delete=False) as test_tensor1_file:
np.save(test_tensor1_file.name, test_tensor1)
with tempfile.NamedTemporaryFile(suffix=".npy", delete=False) as test_tensor2_file:
np.save(test_tensor2_file.name, test_tensor2)
with tempfile.NamedTemporaryFile(suffix=".npy", delete=False) as test_tensor3_file:
np.save(test_tensor3_file.name, test_tensor3)

input1 = InputTensorDescr(
id="input1",
axes=[
BatchAxis(),
ChannelAxis(channel_names=["channel1", "channel2"]),
SpaceInputAxis(id=AxisId("x"), size=10),
SpaceInputAxis(id=AxisId("y"), size=SizeReference(tensor_id="input3", axis_id="y")),
],
description="",
test_tensor=FileDescr(source=Path(test_tensor1_file.name)),
)

input2 = InputTensorDescr(
id="input2",
axes=[
BatchAxis(),
ChannelAxis(channel_names=["channel1", "channel2"]),
SpaceInputAxis(id=AxisId("x"), size=ParameterizedSize(min=10, step=2)),
SpaceInputAxis(id=AxisId("y"), size=ParameterizedSize(min=10, step=5)),
],
description="",
test_tensor=FileDescr(source=Path(test_tensor1_file.name)),
)

input3 = InputTensorDescr(
id="input3",
axes=[
BatchAxis(),
ChannelAxis(channel_names=["channel1", "channel2"]),
SpaceInputAxis(id="x", size=SizeReference(tensor_id="input2", axis_id="x")),
SpaceInputAxis(id="y", size=10),
],
description="",
test_tensor=FileDescr(source=Path(test_tensor1_file.name)),
)

model_descr, expected_output = _bioimage_model([input1, input2, input3])
model_bytes = io.BytesIO()
save_bioimageio_package_to_stream(model_descr, output_stream=model_bytes)
return model_bytes, expected_output


def _bioimage_model(inputs: List[InputTensorDescr]) -> Tuple[ModelDescr, xr.DataArray]:
test_tensor = np.arange(1 * 2 * 10 * 10, dtype="float32").reshape(1, 2, 10, 10)
with tempfile.NamedTemporaryFile(suffix=".npy", delete=False) as test_tensor_file:
np.save(test_tensor_file.name, test_tensor)

dummy_model = _DummyNetwork()
with tempfile.NamedTemporaryFile(suffix=".pts", delete=False) as weights_file:
torch.save(dummy_model.state_dict(), weights_file.name)

output_tensor = OutputTensorDescr(
id="output",
axes=[
BatchAxis(),
ChannelAxis(channel_names=["channel1", "channel2"]),
SpaceOutputAxis(id=AxisId("x"), size=10),
SpaceOutputAxis(id=AxisId("y"), size=10),
],
description="",
test_tensor=FileDescr(source=Path(test_tensor_file.name)),
)

mocked_input1 = create_autospec(v0_5.InputTensorDescr)
mocked_input1.id = "input1"
mocked_input1.axes = [
v0_5.BatchAxis(),
v0_5.ChannelAxis(channel_names=["channel1", "channel2"]),
v0_5.SpaceInputAxis(id=AxisId("x"), size=10),
v0_5.SpaceInputAxis(id=AxisId("y"), size=v0_5.SizeReference(tensor_id="input3", axis_id="x")),
]

mocked_input2 = create_autospec(v0_5.InputTensorDescr)
mocked_input2.id = "input2"
mocked_input2.axes = [
v0_5.BatchAxis(),
v0_5.ChannelAxis(channel_names=["channel1", "channel2"]),
v0_5.SpaceInputAxis(id=AxisId("x"), size=v0_5.ParameterizedSize(min=10, step=2)),
v0_5.SpaceInputAxis(id=AxisId("y"), size=v0_5.ParameterizedSize(min=10, step=5)),
]

mocked_input3 = create_autospec(v0_5.InputTensorDescr)
mocked_input3.id = "input3"
mocked_input3.axes = [
v0_5.BatchAxis(),
v0_5.ChannelAxis(channel_names=["channel1", "channel2"]),
v0_5.SpaceInputAxis(id="x", size=v0_5.SizeReference(tensor_id="input2", axis_id="x")),
v0_5.SpaceInputAxis(id="y", size=10),
]

mocked_prediction_pipeline, mocked_output_sample = _bioimage_model([mocked_input1, mocked_input2, mocked_input3])
with patched_prediction_pipeline(mocked_prediction_pipeline):
yield mocked_prediction_pipeline, mocked_output_sample


def _bioimage_model(inputs: List[v0_5.InputTensorDescr]) -> Tuple[PredictionPipeline, Sample]:
mocked_descr = create_autospec(v0_5.ModelDescr)

mocked_output = create_autospec(v0_5.OutputTensorDescr)
mocked_output.id = "output"
mocked_output.axes = [
v0_5.BatchAxis(),
v0_5.ChannelAxis(channel_names=["channel1", "channel2"]),
v0_5.SpaceInputAxis(id=AxisId("x"), size=20),
v0_5.SpaceInputAxis(id=AxisId("y"), size=20),
]
mocked_descr.inputs = inputs
mocked_descr.outputs = [mocked_output]

mocked_output_sample = Sample(
members={
TensorId("output"): Tensor.from_xarray(
xr.DataArray(np.arange(2 * 20 * 20).reshape((1, 2, 20, 20)), dims=["batch", "channel", "x", "y"])
mocked_descr = ModelDescr(
name="mocked model",
description="A test model for demonstration purposes only",
authors=[Author(name="me", affiliation="my institute", github_user="bioimageiobot")],
# change github_user to your GitHub account name
cite=[CiteEntry(text="for model training see my paper", doi=Doi("10.1234something"))],
license=LicenseId("MIT"),
documentation=HttpUrl("https://raw.githubusercontent.com/bioimage-io/spec-bioimage-io/main/README.md"),
git_repo=HttpUrl("https://github.com/bioimage-io/spec-bioimage-io"),
inputs=inputs,
outputs=[output_tensor],
weights=WeightsDescr(
pytorch_state_dict=PytorchStateDictWeightsDescr(
source=weights_file.name,
architecture=ArchitectureFromLibraryDescr(
import_from="tests.conftest", callable=_DummyNetwork.__name__
),
pytorch_version=Version("1.1.1"),
)
},
id=None,
stat={},
),
)
return mocked_descr, _dummy_network_output


_dummy_network_output = xr.DataArray(np.arange(2 * 10 * 10).reshape(1, 2, 10, 10), dims=["batch", "channel", "x", "y"])


mocked_prediction_pipeline = create_autospec(PredictionPipeline)
mocked_prediction_pipeline.model_description = mocked_descr
mocked_prediction_pipeline.predict_sample_without_blocking.return_value = mocked_output_sample
return mocked_prediction_pipeline, mocked_output_sample
class _DummyNetwork(nn.Module):
def forward(self, *args):
return _dummy_network_output
Loading

0 comments on commit d84c70f

Please sign in to comment.