Skip to content

Commit

Permalink
issue #51: Update doc
Browse files Browse the repository at this point in the history
issue #51: Modify documentation to reflect change on Model namedtuple
  • Loading branch information
MaxenceGui committed Apr 8, 2024
1 parent cac2883 commit dd6e6c2
Show file tree
Hide file tree
Showing 4 changed files with 38 additions and 33 deletions.
32 changes: 20 additions & 12 deletions app.py
Original file line number Diff line number Diff line change
Expand Up @@ -45,7 +45,7 @@
Model = namedtuple(
'Model',
[
'entry_function',
'request_function',
'name',
'endpoint',
'api_key',
Expand Down Expand Up @@ -178,29 +178,26 @@ async def inference_request():

header, encoded_data = image_base64.split(",", 1)

# Validate image header
# Validate image header #TODO with magic header
if not header.startswith("data:image/"):
raise InferenceRequestError("invalid image header")

# Keep track of every output given by the models
# TODO: add it to CACHE variable
cache_json_result = [encoded_data]
image_bytes = base64.b64decode(encoded_data)

container_client = await azure_storage_api.mount_container(
blob_service_client, container_name, create_container=True
)
hash_value = await azure_storage_api.generate_hash(image_bytes)
blob_name = await azure_storage_api.upload_image(
await azure_storage_api.upload_image(
container_client, folder_name, image_bytes, hash_value
)
blob = await azure_storage_api.get_blob(container_client, blob_name)
image_bytes = base64.b64encode(blob).decode("utf8")

# Keep track of every output given by the models
# TODO: add it to CACHE variable
cache_json_result = [image_bytes]

for idx, model in enumerate(pipelines_endpoints.get(pipeline_name)):
print(f"Entering {model.name.upper()} model") # TODO: Transform into logging
result_json = await model.entry_function(model, cache_json_result[idx])
result_json = await model.request_function(model, cache_json_result[idx])
cache_json_result.append(result_json)

print("End of inference request") # TODO: Transform into logging
Expand Down Expand Up @@ -232,6 +229,10 @@ async def inference_request():
print(error)
return jsonify(["Unexpected error occured"]), 500

@app.get("/coffee")
async def get_coffee():
return jsonify("Tea is great!"), 418


@app.get("/seed-data/<seed_name>")
async def get_seed_data(seed_name):
Expand Down Expand Up @@ -264,7 +265,7 @@ async def get_model_endpoints_metadata():
if CACHE['endpoints']:
return jsonify(CACHE['endpoints']), 200
else:
return jsonify("Error retrieving model endpoints metadata.", 400)
return jsonify("Error retrieving model endpoints metadata.", 404)


@app.get("/health")
Expand Down Expand Up @@ -329,7 +330,6 @@ async def get_pipelines():
print(error)
raise ServerError("server errror: could not retrieve the pipelines") from error


models = ()
for model in result_json.get("models"):
m = Model(
Expand Down Expand Up @@ -370,6 +370,14 @@ async def before_serving():
CACHE["seeds"] = await fetch_json(NACHET_DATA, "seeds", "seeds/all.json")
CACHE["endpoints"] = await get_pipelines()

print(
f"""Server start with current configuration:\n
date: {date.today()}
file version of pipelines: {PIPELINE_VERSION}
pipelines: {[pipeline for pipeline in CACHE["pipelines"].keys()]}\n
"""
) #TODO Transform into logging

except ServerError as e:
print(e)
raise
Expand Down
2 changes: 1 addition & 1 deletion azure_storage/azure_storage_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -99,7 +99,7 @@ async def get_blob(container_client: ContainerClient, blob_name: str):
f"the specified blob: {blob_name} cannot be found") from error


async def upload_image(container_client, folder_name, image, hash_value):
async def upload_image(container_client: ContainerClient, folder_name, image, hash_value):
"""
uploads the image to the specified folder within the user's container, if
the specified folder doesnt exist, it creates it with a uuid
Expand Down
33 changes: 15 additions & 18 deletions docs/nachet-inference-documentation.md
Original file line number Diff line number Diff line change
Expand Up @@ -242,50 +242,47 @@ the `CACHE["endpoint"]` variable. This the variable that feed the frontend the `
information and metadata.

```python
async def get_pipeline(mock:bool = False):
async def get_pipelines():
"""
Retrieves the pipelines from the Azure storage API.
Parameters:
- mock (bool): If True, retrieves the pipelines from a mock JSON file. If False, retrieves the pipelines from the Azure storage API.
Returns:
- list: A list of dictionaries representing the pipelines.
"""
if mock:
with open("mock_pipeline_json.json", "r+") as f:
result_json = json.load(f)
else:
result_json = await azure_storage_api.get_pipeline_info(connection_string, PIPELINE_BLOB_NAME, PIPELINE_VERSION)
try:
app.config["BLOB_CLIENT"] = await azure_storage_api.get_blob_client(connection_string)
result_json = await azure_storage_api.get_pipeline_info(app.config["BLOB_CLIENT"], PIPELINE_BLOB_NAME, PIPELINE_VERSION)
cipher_suite = Fernet(FERNET_KEY)
# Get all the api_call function and map them in a dictionary
api_call_function = {func.split("from_")[1]: getattr(model_module, func) for func in dir(model_module) if "inference" in func.split("_")}
# Get all the inference functions and map them in a dictionary
inference_functions = {func: getattr(inference, func) for func in dir(inference) if "process" in func.split("_")}
except (ConnectionStringError, PipelineNotFoundError) as error:
print(error)
raise ServerError("server errror: could not retrieve the pipelines") from error

models = ()
for model in result_json.get("models"):
m = Model(
api_call_function.get(model.get("api_call_function")),
request_function.get(model.get("api_call_function")),
model.get("model_name"),
# To protect sensible data (API key and model endpoint), we encrypt it when
# it's pushed into the blob storage. Once we retrieve the data here in the
# backend, we need to decrypt the byte format to recover the original
# data.
cipher_suite.decrypt(model.get("endpoint").encode()).decode(),
cipher_suite.decrypt(model.get("api_key").encode()).decode(),
inference_functions.get(model.get("inference_function")),
model.get("content-type"),
model.get("content_type"),
model.get("deployment_platform")
)
models += (m,)

# Build the pipeline to call the models in order in the inference request
for pipeline in result_json.get("pipelines"):
CACHE["pipelines"][pipeline.get("pipeline_name")] = tuple([m for m in models if m.name in pipeline.get("models")])

return result_json.get("pipelines")

```

### Available Version of the JSON file

|Version|Creation Date| Pipelines|
|--|--|--|
|0.1.3 | 2024-03-26 | Swin Transformer and 6 Seeds Detector|
|0.1.0 | 2024-02-26 | Swin Transformer and 6 Seeds Detector|
|0.1.1 | 2024-03-14 | Swin Transformer and 6 Seeds Detector|
4 changes: 2 additions & 2 deletions docs/nachet-model-documentation.md
Original file line number Diff line number Diff line change
Expand Up @@ -29,9 +29,9 @@ Nachet Interactive models' perform the following tasks:
|Seed-detector | seed-detector-1 | Object Detection | seed_detector | process_image_slicing | Yes | - |
|Swin | swinv1-base-dataaugv2-1 | Classification | swin | process_swin_result | Yes | - |

### Inference and API Call Function
### Request Inference Function

The inference and API call functions act as entry and exit points for the model. The API call explicitly requests a prediction from the specified model (such as Swin, Nachet-6seeds, etc.). The inference function processes the data before sending it to the frontend if the model requires it. For instance, the Seed-detector only returns "seed" as a label, and its inference needs to be processed and passed to the next model which assigns the correct label to the seeds.
The request inference functions request a prediction from the specified model (such as Swin, Nachet-6seeds, etc.). If needed, the function will process the data to be readable by the next model in the pipeline. For instance, the Seed-detector only returns "seed" as a label, and its inference needs to be processed and passed to the next model which assigns the correct label to the seeds.

## Return value of models

Expand Down

0 comments on commit dd6e6c2

Please sign in to comment.