Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Remove interface model info #215

Merged
merged 8 commits into from
Aug 16, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
11 changes: 2 additions & 9 deletions proto/inference.proto
Original file line number Diff line number Diff line change
Expand Up @@ -86,14 +86,6 @@ message OutputShape {

message ModelSession {
string id = 1;
string name = 2;
repeated string inputAxes = 3;
repeated string outputAxes = 4;
bool hasTraining = 5;
repeated InputShape inputShapes = 6;
repeated OutputShape outputShapes = 7;
repeated string inputNames = 8;
repeated string outputNames = 9;
}

message LogEntry {
Expand Down Expand Up @@ -128,7 +120,8 @@ message NamedFloat {
message Tensor {
bytes buffer = 1;
string dtype = 2;
repeated NamedInt shape = 3;
string tensorId = 3;
repeated NamedInt shape = 4;
}

message PredictRequest {
Expand Down
26 changes: 22 additions & 4 deletions tests/conftest.py
Original file line number Diff line number Diff line change
Expand Up @@ -18,7 +18,9 @@
TEST_DATA = "data"
TEST_BIOIMAGEIO_ZIPFOLDER = "unet2d"
TEST_BIOIMAGEIO_ONNX = "unet2d_onnx"
TEST_BIOIMAGEIO_DUMMY = "dummy"
TEST_BIOIMAGEIO_DUMMY_EXPLICIT = "dummy"
TEST_BIOIMAGEIO_DUMMY_EXPLICIT_RDF = f"{TEST_BIOIMAGEIO_DUMMY_EXPLICIT}/Dummy.model.yaml"
TEST_BIOIMAGEIO_DUMMY_PARAM_RDF = "dummy_param/Dummy.model_param.yaml"
TEST_BIOIMAGEIO_TENSORFLOW_DUMMY = "dummy_tensorflow"
TEST_BIOIMAGEIO_TORCHSCRIPT = "unet2d_torchscript"

Expand Down Expand Up @@ -98,7 +100,7 @@ def bioimageio_model_zipfile(bioimageio_model_bytes):

@pytest.fixture
def bioimageio_dummy_model_filepath(data_path, tmpdir):
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_DUMMY
bioimageio_net_dir = Path(data_path) / TEST_BIOIMAGEIO_DUMMY_EXPLICIT
path = tmpdir / "dummy_model.zip"

with ZipFile(path, mode="w") as zip_model:
Expand All @@ -113,8 +115,24 @@ def bioimageio_dummy_model_filepath(data_path, tmpdir):


@pytest.fixture
def bioimageio_dummy_model_bytes(data_path):
rdf_source = data_path / TEST_BIOIMAGEIO_DUMMY / "Dummy.model.yaml"
def bioimageio_dummy_explicit_model_bytes(data_path):
rdf_source = data_path / TEST_BIOIMAGEIO_DUMMY_EXPLICIT_RDF
return _bioimageio_package(rdf_source)


@pytest.fixture
def bioimageio_dummy_param_model_bytes(data_path):
rdf_source = data_path / TEST_BIOIMAGEIO_DUMMY_PARAM_RDF
return _bioimageio_package(rdf_source)


@pytest.fixture(params=[(TEST_BIOIMAGEIO_DUMMY_PARAM_RDF, "param"), (TEST_BIOIMAGEIO_DUMMY_EXPLICIT_RDF, "input")])
def bioimageio_dummy_model(request, data_path):
path, tensor_id = request.param
yield _bioimageio_package(data_path / path), tensor_id


def _bioimageio_package(rdf_source):
data = io.BytesIO()
export_resource_package(rdf_source, output_path=data)
return data
Expand Down
5 changes: 3 additions & 2 deletions tests/data/dummy/Dummy.model.yaml
Original file line number Diff line number Diff line change
Expand Up @@ -41,15 +41,16 @@ inputs:
data_range: [-inf, inf]
shape: [1, 1, 128, 128]


outputs:
- name: output
axes: bcyx
data_type: float32
data_range: [0, 1]
shape:
reference_tensor: input # FIXME(m-novikov) ignoring for now
reference_tensor: input
scale: [1, 1, 1, 1]
offset: [0, 0, 0, 0]
halo: [0, 0, 32, 32] # Should be moved to outputs
halo: [0, 0, 32, 32]

type: model
57 changes: 57 additions & 0 deletions tests/data/dummy_param/Dummy.model_param.yaml
Original file line number Diff line number Diff line change
@@ -0,0 +1,57 @@
format_version: 0.3.3
language: python
framework: pytorch

name: UNet2DNucleiBroad
description: A 2d U-Net pretrained on broad nucleus dataset.
cite:
- text: "Ronneberger, Olaf et al. U-net: Convolutional networks for biomedical image segmentation. MICCAI 2015."
doi: https://doi.org/10.1007/978-3-319-24574-4_28
authors:
- name: "ilastik-team"
affiliation: "EMBL Heidelberg"

documentation: dummy.md
tags: [pytorch, nucleus-segmentation]
license: MIT
git_repo: https://github.com/ilastik/tiktorch
covers: []

source: dummy.py::Dummy
sha256: 00ffb1647cf7ec524892206dce6258d9da498fe040c62838f31b501a09bfd573
timestamp: 2019-12-11T12:22:32Z # ISO 8601

test_inputs: [dummy_in.npy]
test_outputs: [dummy_out.npy]

weights:
pytorch_state_dict:
source: ./weights
sha256: 518cb80bad2eb3ec3dfbe6bab74920951391ce8fb24e15cf59b9b9f052a575a6
authors:
- name: "ilastik-team"
affiliation: "EMBL Heidelberg"


# TODO double check inputs/outputs
inputs:
- name: param
axes: bcyx
data_type: float32
data_range: [-inf, inf]
shape:
min: [1, 1, 64, 64]
step: [0, 0, 2, 1]

outputs:
- name: output
axes: bcyx
data_type: float32
data_range: [0, 1]
shape:
reference_tensor: param
scale: [1, 1, 1, 1]
offset: [0, 0, 0, 0]
halo: [0, 0, 8, 8]

type: model
Empty file added tests/data/dummy_param/dummy.md
Empty file.
7 changes: 7 additions & 0 deletions tests/data/dummy_param/dummy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,7 @@
from torch import nn


class Dummy(nn.Module):
def forward(self, input):
x = input
return x + 1
Binary file added tests/data/dummy_param/dummy_in.npy
Binary file not shown.
Binary file added tests/data/dummy_param/dummy_out.npy
Binary file not shown.
Empty file.
Binary file added tests/data/dummy_param/weights
Binary file not shown.
117 changes: 85 additions & 32 deletions tests/test_converters.py
Original file line number Diff line number Diff line change
Expand Up @@ -7,6 +7,7 @@
NamedExplicitOutputShape,
NamedImplicitOutputShape,
NamedParametrizedShape,
Sample,
input_shape_to_pb_input_shape,
numpy_to_pb_tensor,
output_shape_to_pb_output_shape,
Expand All @@ -27,6 +28,16 @@ def _numpy_to_pb_tensor(arr):
return parsed


def to_pb_tensor(tensor_id: str, arr: xr.DataArray):
"""
Makes sure that tensor was serialized/deserialized
"""
tensor = xarray_to_pb_tensor(tensor_id, arr)
parsed = inference_pb2.Tensor()
parsed.ParseFromString(tensor.SerializeToString())
return parsed


class TestNumpyToPBTensor:
def test_should_serialize_to_tensor_type(self):
arr = np.arange(9)
Expand Down Expand Up @@ -97,18 +108,9 @@ def test_should_same_data(self, shape):


class TestXarrayToPBTensor:
def to_pb_tensor(self, arr):
"""
Makes sure that tensor was serialized/deserialized
"""
tensor = xarray_to_pb_tensor(arr)
parsed = inference_pb2.Tensor()
parsed.ParseFromString(tensor.SerializeToString())
return parsed

def test_should_serialize_to_tensor_type(self):
xarr = xr.DataArray(np.arange(8).reshape((2, 4)), dims=("x", "y"))
pb_tensor = self.to_pb_tensor(xarr)
pb_tensor = to_pb_tensor("input0", xarr)
assert isinstance(pb_tensor, inference_pb2.Tensor)
assert len(pb_tensor.shape) == 2
dim1 = pb_tensor.shape[0]
Expand All @@ -123,28 +125,19 @@ def test_should_serialize_to_tensor_type(self):
@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_have_shape(self, shape):
arr = xr.DataArray(np.zeros(shape))
tensor = self.to_pb_tensor(arr)
tensor = to_pb_tensor("input0", arr)
assert tensor.shape
assert list(shape) == [dim.size for dim in tensor.shape]

def test_should_have_serialized_bytes(self):
arr = xr.DataArray(np.arange(9, dtype=np.uint8))
expected = bytes(arr.data)
tensor = self.to_pb_tensor(arr)
tensor = to_pb_tensor("input0", arr)

assert expected == tensor.buffer


class TestPBTensorToXarray:
def to_pb_tensor(self, arr):
"""
Makes sure that tensor was serialized/deserialized
"""
tensor = xarray_to_pb_tensor(arr)
parsed = inference_pb2.Tensor()
parsed.ParseFromString(tensor.SerializeToString())
return parsed

def test_should_raise_on_empty_dtype(self):
tensor = inference_pb2.Tensor(dtype="", shape=[inference_pb2.NamedInt(size=1), inference_pb2.NamedInt(size=2)])
with pytest.raises(ValueError):
Expand All @@ -155,33 +148,32 @@ def test_should_raise_on_empty_shape(self):
with pytest.raises(ValueError):
pb_tensor_to_xarray(tensor)

def test_should_return_ndarray(self):
def test_should_return_xarray(self):
arr = xr.DataArray(np.arange(9))
parsed = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(parsed)

assert isinstance(result_arr, xr.DataArray)
parsed = to_pb_tensor("input0", arr)
result_tensor = pb_tensor_to_xarray(parsed)
assert isinstance(result_tensor, xr.DataArray)

@pytest.mark.parametrize("np_dtype,dtype_str", [(np.int64, "int64"), (np.uint8, "uint8"), (np.float32, "float32")])
def test_should_have_same_dtype(self, np_dtype, dtype_str):
arr = xr.DataArray(np.arange(9, dtype=np_dtype))
tensor = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(tensor)
pb_tensor = to_pb_tensor("input0", arr)
result_arr = pb_tensor_to_xarray(pb_tensor)

assert arr.dtype == result_arr.dtype

@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_same_shape(self, shape):
arr = xr.DataArray(np.zeros(shape))
tensor = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(tensor)
pb_tensor = to_pb_tensor("input0", arr)
result_arr = pb_tensor_to_xarray(pb_tensor)
assert arr.shape == result_arr.shape

@pytest.mark.parametrize("shape", [(3, 3), (1,), (1, 1), (18, 20, 1)])
def test_should_same_data(self, shape):
arr = xr.DataArray(np.random.random(shape))
tensor = self.to_pb_tensor(arr)
result_arr = pb_tensor_to_xarray(tensor)
pb_tensor = to_pb_tensor("input0", arr)
result_arr = pb_tensor_to_xarray(pb_tensor)
assert_array_equal(arr, result_arr)


Expand Down Expand Up @@ -276,3 +268,64 @@ def test_parametrized_input_shape(self, min_shape, axes, step):
assert [(d.name, d.size) for d in pb_shape.stepShape.namedInts] == [
(name, size) for name, size in zip(axes, step)
]


class TestSample:
def test_create_sample_from_pb_tensors(self):
arr_1 = np.arange(32 * 32, dtype=np.int64).reshape(32, 32)
tensor_1 = inference_pb2.Tensor(
dtype="int64",
tensorId="input1",
buffer=bytes(arr_1),
shape=[inference_pb2.NamedInt(name="x", size=32), inference_pb2.NamedInt(name="y", size=32)],
)

arr_2 = np.arange(64 * 64, dtype=int).reshape(64, 64)
tensor_2 = inference_pb2.Tensor(
dtype="int64",
tensorId="input2",
buffer=bytes(arr_2),
shape=[inference_pb2.NamedInt(name="x", size=64), inference_pb2.NamedInt(name="y", size=64)],
)

sample = Sample.from_pb_tensors([tensor_1, tensor_2])
assert len(sample.tensors) == 2
assert sample.tensors["input1"].equals(xr.DataArray(arr_1, dims=["x", "y"]))
assert sample.tensors["input2"].equals(xr.DataArray(arr_2, dims=["x", "y"]))

def test_create_sample_from_raw_data(self):
arr_1 = np.arange(32 * 32, dtype=np.int64).reshape(32, 32)
tensor_1 = xr.DataArray(arr_1, dims=["x", "y"])
arr_2 = np.arange(64 * 64, dtype=np.int64).reshape(64, 64)
tensor_2 = xr.DataArray(arr_2, dims=["x", "y"])
tensors_ids = ["input1", "input2"]
actual_sample = Sample.from_xr_tensors(tensors_ids, [tensor_1, tensor_2])

expected_dict = {tensors_ids[0]: tensor_1, tensors_ids[1]: tensor_2}
expected_sample = Sample(expected_dict)
assert actual_sample == expected_sample

def test_sample_to_pb_tensors(self):
arr_1 = np.arange(32 * 32, dtype=np.int64).reshape(32, 32)
tensor_1 = xr.DataArray(arr_1, dims=["x", "y"])
arr_2 = np.arange(64 * 64, dtype=np.int64).reshape(64, 64)
tensor_2 = xr.DataArray(arr_2, dims=["x", "y"])
tensors_ids = ["input1", "input2"]
sample = Sample.from_xr_tensors(tensors_ids, [tensor_1, tensor_2])

pb_tensor_1 = inference_pb2.Tensor(
dtype="int64",
tensorId="input1",
buffer=bytes(arr_1),
shape=[inference_pb2.NamedInt(name="x", size=32), inference_pb2.NamedInt(name="y", size=32)],
)
pb_tensor_2 = inference_pb2.Tensor(
dtype="int64",
tensorId="input2",
buffer=bytes(arr_2),
shape=[inference_pb2.NamedInt(name="x", size=64), inference_pb2.NamedInt(name="y", size=64)],
)
expected_tensors = [pb_tensor_1, pb_tensor_2]

actual_tensors = sample.to_pb_tensors()
assert expected_tensors == actual_tensors
10 changes: 5 additions & 5 deletions tests/test_rpc/test_mp.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,7 +8,7 @@

from tiktorch import log
from tiktorch.rpc import RPCFuture, RPCInterface, Shutdown, exposed
from tiktorch.rpc.mp import FutureStore, MPServer, create_client
from tiktorch.rpc.mp import FutureStore, MPServer, create_client_api


class ITestApi(RPCInterface):
Expand Down Expand Up @@ -64,7 +64,7 @@ def client(log_queue):
p = mp.Process(target=_srv, args=(parent, log_queue))
p.start()

client = create_client(ITestApi, child, timeout=10)
client = create_client_api(iface_cls=ITestApi, conn=child, timeout=10)

yield client

Expand Down Expand Up @@ -108,7 +108,7 @@ def __getattr__(self, name):
p = mp.Process(target=_srv, args=(parent, log_queue))
p.start()

client = create_client(ITestApi, SlowConn(child))
client = create_client_api(iface_cls=ITestApi, conn=SlowConn(child))

client.fast_compute(2, 2)

Expand All @@ -121,7 +121,7 @@ def test_future_timeout(client: ITestApi, log_queue):
p = mp.Process(target=_srv, args=(parent, log_queue))
p.start()

client = create_client(ITestApi, child, timeout=0.001)
client = create_client_api(iface_cls=ITestApi, conn=child, timeout=0.001)

with pytest.raises(TimeoutError):
client.compute(1, 2)
Expand Down Expand Up @@ -256,7 +256,7 @@ def _spawn(iface_cls, srv_cls):
p = mp.Process(target=_run_srv, args=(srv_cls, parent, log_queue))
p.start()

data["client"] = client = create_client(iface_cls, child)
data["client"] = client = create_client_api(iface_cls=iface_cls, conn=child)
data["process"] = p
return client

Expand Down
Loading