Skip to content
This repository has been archived by the owner on May 5, 2023. It is now read-only.

Commit

Permalink
add different routes for:
Browse files Browse the repository at this point in the history
- tracking from video or zip
- upload image file and visualize predictions
- upload image as JSON to get predictions
- upgrade werkzeug requirements
- change the way we handle zip files (recursively move files to root)
  • Loading branch information
anth2o committed Jun 28, 2020
1 parent 47598d9 commit a243041
Show file tree
Hide file tree
Showing 16 changed files with 459 additions and 392 deletions.
2 changes: 2 additions & 0 deletions .gitignore
Original file line number Diff line number Diff line change
Expand Up @@ -155,3 +155,5 @@ images_errors.csv

# patch files
*.py.patch

tmp/
2 changes: 1 addition & 1 deletion requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -9,5 +9,5 @@ ffmpeg-python
wget
requests
flask>=1.1.1
werkzeug==0.16.1 # It fixes the error: from werkzeug import FileStorage cannot import FileStorage.
werkzeug>=1.0.1
cached_property
1 change: 1 addition & 0 deletions scripts/up_serving.sh
Original file line number Diff line number Diff line change
Expand Up @@ -4,5 +4,6 @@ RATIO_GPU=${RATIO_GPU:-0.45}
echo "Using GPU: $NVIDIA_VISIBLE_DEVICES"
echo "Limiting GPU to ratio: $RATIO_GPU"

cd /src/mot/serving
python3 -m mot.serving.app &
/usr/bin/tf_serving_entrypoint.sh --per_process_gpu_memory_fraction=$RATIO_GPU
26 changes: 18 additions & 8 deletions src/mot/object_detection/query_server.py
Original file line number Diff line number Diff line change
@@ -1,13 +1,15 @@
import json
import logging
import os
from typing import Dict

import numpy as np
import requests
from tensorpack import logger

from mot.object_detection.preprocessing import preprocess_for_serving

logger = logging.getLogger(__file__)


def query_tensorflow_server(signature: Dict, url: str) -> Dict:
"""Will send a REST query to the tensorflow server.
Expand Down Expand Up @@ -61,12 +63,16 @@ def localizer_tensorflow_serving_inference(
Return:
- *Dict*: A dict with the predictions with the following format:
- *predictions*: A dict with the predictions with the following format:
```python
if return_all_scores:
predictions = {
'output/boxes:0': [[0, 0, 1, 1], [0, 0, 10, 10], [10, 10, 15, 100]], (y1, x1, y2, x2)
'output/boxes:0': [
[0.1, 0.1, 0.9, 0.9],
[0.0, 0.2, 0.1, 0.4],
[0.2, 0.4, 0.5, 0.7],
], (y1, x1, y2, x2) scaled between 0 and 1
'output/labels:0': [3, 1, 2], # the labels start at 1 since 0 is for background
'output/scores:0': [
[0.001, 0.001, 0.98],
Expand All @@ -76,7 +82,11 @@ def localizer_tensorflow_serving_inference(
}
else:
predictions = {
'output/boxes:0': [[0, 0, 1, 1], [0, 0, 10, 10], [10, 10, 15, 100]],
'output/boxes:0': [
[0.1, 0.1, 0.9, 0.9],
[0.0, 0.2, 0.1, 0.4],
[0.2, 0.4, 0.5, 0.7],
], # (y1, x1, y2, x2) scaled between 0 and 1
'output/labels:0': [3, 1, 2], # the labels start at 1 since 0 is for background
'output/scores:0': [0.98, 0.87, 0.76] # sorted in descending order
}
Expand All @@ -87,10 +97,10 @@ def localizer_tensorflow_serving_inference(
scores = np.array(predictions['output/scores:0'])
if len(predictions["output/boxes:0"]) > 0:
predictions['output/boxes:0'] = np.array(predictions['output/boxes:0'], np.int32) / ratio
predictions["output/boxes:0"][:, 0] /= image.shape[0] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 1] /= image.shape[1] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 2] /= image.shape[0] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 3] /= image.shape[1] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 0] /= image.shape[0] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 1] /= image.shape[1] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 2] /= image.shape[0] # scaling coords to [0, 1]
predictions["output/boxes:0"][:, 3] /= image.shape[1] # scaling coords to [0, 1]
predictions['output/boxes:0'] = predictions['output/boxes:0'].tolist()
if return_all_scores and len(scores.shape) == 1:
raise ValueError(
Expand Down
37 changes: 22 additions & 15 deletions src/mot/serving/README.md
Original file line number Diff line number Diff line change
Expand Up @@ -24,31 +24,38 @@ NVIDIA_VISIBLE_DEVICES=2 RATIO_GPU=0.3 MODEL_FOLDER=/path/to/serving PORT=the_po

Here are the different ways to perform inference requests.

### Web interface
### Tracking

You can access a basic web interface to manually upload pictures or videos to do inference.
In your browser, access the address `host:port`, with port being the one you specified in the previous step.
*host:port/tracking*

### cURL

#### Json

This only works for images.
Available with web interface or a simple curl. You can upload a video or a zip archive containing images.

```bash
curl -d @/path/to/json --header "Content-Type: application/json" host:port
curl -F "file=@/path/to/video.mp4" -F "fps=2" -F "resolution=(10,10)" host:port
```

#### File
You don't have to specify those parameters and you can find their default value in [this file](inference.py).

### Demo

*host:port/demo*

Available with web interface: you can upload an image, a localizer will make predictions on it which will be displayed in your browser.


### Image

*host:port/image*

You can post to get the predictions of the localizer
- an image file:

```bash
curl -F "file=@/path/to/file" host:port
curl -F "file=@/path/to/image.jpg" host:port
```

For videos you can add parameters such as fps and resolution like that:
- an image as a JSON in BGR:

```bash
curl -F "file=@/path/to/video.mp4" -F "fps=2" -F "resolution=(10,10)" host:port
curl -d @/path/to/json --header "Content-Type: application/json" host:port
```

You don't have to specify those parameters and you can find their default value in [this file](inference.py).
54 changes: 48 additions & 6 deletions src/mot/serving/app.py
Original file line number Diff line number Diff line change
@@ -1,15 +1,57 @@
import json
import os

import cv2
import numpy as np
from flask import Flask, render_template, request

from mot.serving.inference import handle_post_request
from mot.serving.constants import TMP_IMAGE_NAME, UPLOAD_FOLDER
from mot.serving.inference import (
detect_and_track_images, predict_and_format_image, predict_image_file
)
from mot.serving.viz import draw_boxes

app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER


@app.route('/tracking', methods=['GET', 'POST'])
def tracking():
if request.method == "GET":
# landing page on browser
return render_template("upload.html")
return detect_and_track_images(request.files['file'], app.config["UPLOAD_FOLDER"])


@app.route('/demo', methods=['GET', 'POST'])
def demo():
"""Route to upload an image and visualize the prediction of a localizer."""
if request.method == "GET":
# landing page on browser
return render_template("upload_image.html")

analysis_results = predict_image_file(request.files["file"], app.config["UPLOAD_FOLDER"])
draw_boxes(analysis_results["full_filepath"], analysis_results["detected_trash"])
return render_template("image.html", filename=analysis_results["full_filepath"])


@app.route('/', methods=['GET', 'POST'])
def index():
if request.method == 'POST':
return handle_post_request()
return render_template("upload.html")
@app.route('/image', methods=['POST'])
def image():
"""Route to upload an image file or a JSON image in BGR and get the prediction of a localizer."""
if "file" in request.files:
return predict_image_file(request.files["file"], app.config["UPLOAD_FOLDER"])
else:
data = json.loads(request.data.decode("utf-8"))
if "image" not in data:
return {
"error":
"Your JSON must have a field image with the image as an array in RGB"
}
image = np.array(data["image"])
image_path = os.path.join(app.config["UPLOAD_FOLDER"], TMP_IMAGE_NAME)
cv2.imwrite(image_path, image)
detected_trash = predict_and_format_image(image)
return {"detected_trash": detected_trash}


if __name__ == "__main__":
Expand Down
27 changes: 27 additions & 0 deletions src/mot/serving/constants.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,27 @@
import multiprocessing


# app configuration
UPLOAD_FOLDER = "static/tmp"
TMP_IMAGE_NAME = "tmp_image.jpg"
SERVING_URL = "http://localhost:8501" # the url where the tf-serving container exposes the model
CPU_COUNT = min(int(multiprocessing.cpu_count() / 2), 32)


# video settings
FPS = 4
RESOLUTION = (1024, 768)
SUM_THRESHOLD = 0.6 # the sum of scores for all classes must be greater than this value

# object detection settings
CLASS_NAMES = ["bottles", "others", "fragments"]
# for the prediction to be kept
CLASS_TO_THRESHOLD = {"bottles": 0.7, "others": 0.7, "fragments": 0.7}
DEFAULT_THRESHOLD = 0.5 # default threshold applied when the class isn't in CLASS_TO_THRESHOLD


CLASS_NAME_TO_COLOR = {
"bottles": (255, 0, 0),
"others": (0, 255, 0),
"fragments": (0, 0, 255),
}
Loading

0 comments on commit a243041

Please sign in to comment.