Skip to content

Commit

Permalink
Merge pull request #7 from bioimage-io/web-ui
Browse files Browse the repository at this point in the history
Web UI
  • Loading branch information
oeway authored Aug 26, 2024
2 parents ce7bdcf + f841df7 commit 2eeaa76
Show file tree
Hide file tree
Showing 10 changed files with 918 additions and 540 deletions.
3 changes: 0 additions & 3 deletions .github/workflows/docker-publish.yml
Original file line number Diff line number Diff line change
Expand Up @@ -4,9 +4,6 @@ on:
push:
branches:
- main # Trigger the workflow on pushes to the main branch
pull_request:
branches:
- main # Trigger the workflow on pull requests to the main branch

jobs:
build-and-push:
Expand Down
3 changes: 3 additions & 0 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -29,6 +29,9 @@ COPY ./bioimageio_colab/register_sam_service.py /app/register_sam_service.py
# Change ownership of the application directory to the non-root user
RUN chown -R bioimageio_colab:bioimageio_colab /app/

# Add a build argument for cache invalidation
ARG CACHEBUST=1

# Fetch the Hypha server version and reinstall or upgrade hypha-rpc to the matching version
RUN HYPHA_VERSION=$(curl -s https://hypha.aicell.io/assets/config.json | jq -r '.hypha_version') && \
pip install --upgrade "hypha-rpc<=$HYPHA_VERSION"
Expand Down
5 changes: 4 additions & 1 deletion build_and_push.sh
Original file line number Diff line number Diff line change
Expand Up @@ -22,7 +22,10 @@ IMAGE_NAME=ghcr.io/${GITHUB_REPOSITORY}:latest
# Log in to GHCR
echo "$GHCR_PAT" | docker login ghcr.io -u "$GITHUB_ACTOR" --password-stdin

# Build the Docker image using Docker Compose
# Generate a dynamic CACHEBUST value (timestamp)
export CACHEBUST=$(date +%Y%m%d%H%M%S)

# Build the Docker image using Docker Compose with the CACHEBUST argument
docker-compose build

# Push the Docker image to GHCR
Expand Down
2 changes: 1 addition & 1 deletion docker-compose.yml
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@ services:
context: .
dockerfile: Dockerfile
args:
SOURCE_LABEL: "https://github.com/bioimage-io/bioimageio-colab"
CACHEBUST: ${CACHEBUST}
image: ghcr.io/bioimage-io/bioimageio-colab:latest
env_file:
- .env
Expand Down
102 changes: 102 additions & 0 deletions docs/data-providing-service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,102 @@
import os
import json
from typing import Tuple
import time
from functools import partial

import numpy as np
from hypha_rpc import connect_to_server
from kaibu_utils import features_to_mask
from tifffile import imread, imwrite


def list_image_files(image_folder: str, supported_file_types: Tuple[str]):
return [f for f in os.listdir(image_folder) if f.endswith(supported_file_types)]


def read_image(file_path: str):
image = imread(file_path)
if len(image.shape) == 3 and image.shape[0] == 3:
image = np.transpose(image, [1, 2, 0])
return image


def get_random_image(image_folder: str, supported_file_types: Tuple[str]):
filenames = list_image_files(image_folder, supported_file_types)
r = np.random.randint(len(filenames) - 1)
file_name = filenames[r]
image = read_image(os.path.join(image_folder, file_name))
return (image, file_name.split(".")[0])


def save_annotation(annotations_folder: str, image_name: str, features, image_shape):
mask = features_to_mask(features, image_shape)
n_image_masks = len(
[f for f in os.listdir(annotations_folder) if f.startswith(image_name)]
)
mask_name = os.path.join(
annotations_folder, f"{image_name}_mask_{n_image_masks + 1}.tif"
)
imwrite(mask_name, mask)


def upload_image_to_s3():
"""
Steps:
- Create a user prefix on S3
- Create a data and annotation prefix
- For every image:
- Load the image from the data folder into a numpy array
- Upload the image to the data prefix
Return:
- The user prefix
# TODO: register a data providing service on K8S cluster that uses the user prefix (get_random_image_s3, save_annotation_s3)
"""
raise NotImplementedError

async def register_service(
server_url: str,
token: str,
supported_file_types_json: str,
name: str,
description: str,
):
# Define path to images and annotations
images_path = "/mnt"
annotations_path = "/mnt/annotations"

# Check if the images folder exists
if not os.path.isdir(images_path):
raise FileNotFoundError("Mounted images folder not found")

# Decode the JSON string to a Python tuple
supported_file_types = tuple(json.loads(supported_file_types_json))

# Connect to the server link
server = await connect_to_server({"server_url": server_url, "token": token})

# Register the service
svc = await server.register_service(
{
"name": name,
"description": description,
"id": "data-provider-" + str(int(time.time()*100)),
"type": "annotation-data-provider",
"config": {
"visibility": "public", # TODO: make protected
"run_in_executor": True,
},
# Exposed functions:
# get a random image from the dataset
# returns the image as a numpy image
"get_random_image": partial(
get_random_image, images_path, supported_file_types
),
# save the annotation mask
# pass the filename of the image, the new filename, the features and the image shape
"save_annotation": partial(save_annotation, annotations_path),
}
)
print(f"Service registered with ID: {svc['id']}")
Loading

0 comments on commit 2eeaa76

Please sign in to comment.