diff --git a/clip/config.yaml b/clip/config.yaml index a064b11..873618c 100644 --- a/clip/config.yaml +++ b/clip/config.yaml @@ -4,11 +4,11 @@ model_metadata: example_model_input: url: https://images.pexels.com/photos/1170986/pexels-photo-1170986.jpeg?auto=compress&cs=tinysrgb&w=1600 model_name: clip-example -python_version: py39 +python_version: py311 requirements: -- transformers==4.32.0 -- pillow==10.0.0 -- torch==2.0.1 +- transformers==4.47.1 +- pillow +- torch resources: accelerator: A10G cpu: '3' diff --git a/clip/model/model.py b/clip/model/model.py index 5257317..90308ee 100644 --- a/clip/model/model.py +++ b/clip/model/model.py @@ -58,6 +58,7 @@ def preprocess(self, request: Dict) -> Dict: padding=True, ) return request + # The `predict` method performs the actual inference, and outputs a probability associated # with each of the labels defined earlier.