Skip to content

Commit

Permalink
Merge pull request #19 from truefoundry/np-add-service-mnist
Browse files Browse the repository at this point in the history
add service deployment mnist
  • Loading branch information
nikp1172 authored Feb 19, 2024
2 parents 325d938 + f0696a4 commit 6e8e7c0
Show file tree
Hide file tree
Showing 11 changed files with 494 additions and 35 deletions.
336 changes: 336 additions & 0 deletions mnist-classifaction/deploy_model.ipynb

Large diffs are not rendered by default.

48 changes: 48 additions & 0 deletions mnist-classifaction/deploy_model/deploy.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,48 @@
import argparse
import logging
from servicefoundry import (
Build,
PythonBuild,
Service,
Resources,
Port,
ArtifactsDownload,
TruefoundryArtifactSource,
)

logging.basicConfig(level=logging.INFO)

parser = argparse.ArgumentParser()
parser.add_argument("--workspace_fqn", type=str, required=True)
parser.add_argument("--model_version_fqn", type=str, required=True)
parser.add_argument("--host", type=str, required=True)
parser.add_argument("--path", type=str, required=False)
args = parser.parse_args()

service = Service(
name="mnist-classification-svc",
image=Build(
build_spec=PythonBuild(
command="python gradio_demo.py",
requirements_path="requirements.txt",
)
),
ports=[Port(port=8000, host=args.host, path=args.path)],
resources=Resources(
memory_limit=500,
memory_request=500,
ephemeral_storage_limit=600,
ephemeral_storage_request=600,
cpu_limit=0.3,
cpu_request=0.3,
),
artifacts_download=ArtifactsDownload(
artifacts=[
TruefoundryArtifactSource(
artifact_version_fqn=args.model_version_fqn,
download_path_env_variable="MODEL_DOWNLOAD_PATH",
)
]
),
)
service.deploy(workspace_fqn=args.workspace_fqn)
32 changes: 32 additions & 0 deletions mnist-classifaction/deploy_model/fastapi_service.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,32 @@
from fastapi import FastAPI
from pydantic import BaseModel
from predict import predict_fn, load_model
import tensorflow as tf
import numpy as np
import os

model_path = os.path.join(os.environ.get("MODEL_DOWNLOAD_PATH", "."), "mnist_model.h5")
model = load_model(model_path)

app = FastAPI(docs_url="/", root_path=os.getenv("TFY_SERVICE_ROOT_PATH", "/"))


class ImageUrl(BaseModel):
url: str = "https://conx.readthedocs.io/en/latest/_images/MNIST_6_0.png"


def load_image(img_url: str) -> np.ndarray:
img_path = tf.keras.utils.get_file("image.jpg", img_url)
img = tf.keras.preprocessing.image.load_img(img_path, target_size=(28, 28))
img_arr = tf.keras.preprocessing.image.img_to_array(img)
return img_arr


app = FastAPI()


@app.post("/predict")
async def predict(body: ImageUrl):
img_arr = load_image(body.url)
prediction = predict_fn(model, img_arr)
return {"prediction": prediction}
20 changes: 20 additions & 0 deletions mnist-classifaction/deploy_model/gradio_demo.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
from predict import predict_fn, load_model
import os
import gradio as gr

model_path = os.path.join(os.environ.get("MODEL_DOWNLOAD_PATH", "."), "mnist_model.h5")
model = load_model(model_path)


def get_inference(img_arr):
return predict_fn(model, img_arr)


interface = gr.Interface(
fn=get_inference,
inputs="image",
outputs="label",
examples=[["sample_images/0.jpg"], ["sample_images/1.jpg"]],
)

interface.launch(server_name="0.0.0.0", server_port=8000)
20 changes: 20 additions & 0 deletions mnist-classifaction/deploy_model/predict.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,20 @@
import tensorflow as tf
import numpy as np


def load_model(model_path: str) -> tf.keras.Model:
# Load the trained model
model = tf.keras.models.load_model(model_path)
return model


def predict_fn(model, img_arr: np.ndarray) -> str:
# Preprocess the image before passing it to the model
img_arr = tf.expand_dims(img_arr, 0)
img_arr = img_arr[:, :, :, 0] # Keep only the first channel (grayscale)

# Make predictions
predictions = model.predict(img_arr)
predicted_label = tf.argmax(predictions[0]).numpy()

return str(predicted_label)
3 changes: 3 additions & 0 deletions mnist-classifaction/deploy_model/requirements.txt
Original file line number Diff line number Diff line change
@@ -0,0 +1,3 @@
tensorflow==2.15.0
gradio==3.39.0
fastapi==0.89.1
File renamed without changes
File renamed without changes
18 changes: 7 additions & 11 deletions mnist-classifaction/train_job/deploy.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,18 +16,14 @@
command="python train.py --num_epochs {{num_epochs}} --ml_repo {{ml_repo}}",
requirements_path="requirements.txt",
),
build_source=LocalSource(local_build=False)
build_source=LocalSource(local_build=False),
),
params=[
Param(name="num_epochs", default='4'),
Param(name="ml_repo", param_type="ml_repo"),
],
Param(name="num_epochs", default="4"),
Param(name="ml_repo", param_type="ml_repo"),
],
resources=Resources(
cpu_request=0.5,
cpu_limit=0.5,
memory_request=1500,
memory_limit=2000
)

cpu_request=0.5, cpu_limit=0.5, memory_request=1500, memory_limit=2000
),
)
deployment = job.deploy(workspace_fqn=args.workspace_fqn)
deployment = job.deploy(workspace_fqn=args.workspace_fqn)
50 changes: 27 additions & 23 deletions mnist-classifaction/train_job/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -8,12 +8,8 @@

# parsing the arguments
parser = argparse.ArgumentParser()
parser.add_argument(
"--num_epochs", type=int, default=4
)
parser.add_argument(
"--ml_repo", type=str, required=True
)
parser.add_argument("--num_epochs", type=int, default=4)
parser.add_argument("--ml_repo", type=str, required=True)
args = parser.parse_args()


Expand All @@ -36,35 +32,45 @@
# Plot some sample images
plt.figure(figsize=(10, 5))
for i in range(10):
plt.subplot(2, 5, i+1)
plt.imshow(x_train[i], cmap='gray')
plt.subplot(2, 5, i + 1)
plt.imshow(x_train[i], cmap="gray")
plt.title(f"Label: {y_train[i]}")
plt.axis('off')
plt.axis("off")
run.log_plots({"images": plt})
plt.tight_layout()


# Define the model architecture
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation='relu'),
tf.keras.layers.Dense(10, activation='softmax')
])
model = tf.keras.Sequential(
[
tf.keras.layers.Flatten(input_shape=(28, 28)),
tf.keras.layers.Dense(128, activation="relu"),
tf.keras.layers.Dense(10, activation="softmax"),
]
)

# Compile the model
model.compile(optimizer='adam', loss='sparse_categorical_crossentropy', metrics=['accuracy'])
model.compile(
optimizer="adam", loss="sparse_categorical_crossentropy", metrics=["accuracy"]
)

#logging the parameters
run.log_params({"optimizer": "adam", "loss": "sparse_categorical_crossentropy", "metric": ["accuracy"]})
# logging the parameters
run.log_params(
{
"optimizer": "adam",
"loss": "sparse_categorical_crossentropy",
"metric": ["accuracy"],
}
)

# Train the model
epochs = args.num_epochs
model.fit(x_train, y_train, epochs=epochs, validation_data=(x_test, y_test))

# Evaluate the model
loss, accuracy = model.evaluate(x_test, y_test)
print(f'Test loss: {loss}')
print(f'Test accuracy: {accuracy}')
print(f"Test loss: {loss}")
print(f"Test accuracy: {accuracy}")


# Log Metrics and Model
Expand All @@ -73,16 +79,14 @@
run.log_metrics(metric_dict={"accuracy": accuracy, "loss": loss})

# Save the trained model
model.save('mnist_model.h5')
model.save("mnist_model.h5")

# Logging the model
run.log_model(
name="handwritten-digits-recognition",
model_file_or_folder='mnist_model.h5',
model_file_or_folder="mnist_model.h5",
framework="tensorflow",
description="sample model to recognize the handwritten digits",
metadata={"accuracy": accuracy, "loss": loss},
step=1, # step number, useful when using iterative algorithms like SGD
)


2 changes: 1 addition & 1 deletion mnist-classifaction/train_model.ipynb
Original file line number Diff line number Diff line change
Expand Up @@ -6,7 +6,7 @@
"id": "QOAnoPl-dlSY"
},
"source": [
"# Train and Deploy Model on Truefoundry\n",
"# Train Model on Truefoundry\n",
"This notebook demonstrates a demo on how you can train an image classification model on mnist dataset and deploy the model training job on truefoundry platform."
]
},
Expand Down

0 comments on commit 6e8e7c0

Please sign in to comment.