diff --git a/docs/_apps/swt-detection/v4.0/index.md b/docs/_apps/swt-detection/v4.0/index.md new file mode 100644 index 0000000..560169a --- /dev/null +++ b/docs/_apps/swt-detection/v4.0/index.md @@ -0,0 +1,59 @@ +--- +layout: posts +classes: wide +title: "Scenes-with-text Detection (v4.0)" +date: 2024-02-26T18:27:42+00:00 +--- +## About this version + +* Submitter: [marcverhagen](https://github.com/marcverhagen) +* Submission Time: 2024-02-26T18:27:42+00:00 +* Prebuilt Container Image: [ghcr.io/clamsproject/app-swt-detection:v4.0](https://github.com/clamsproject/app-swt-detection/pkgs/container/app-swt-detection/v4.0) +* Release Notes + + > Version 4.0 + +## About this app (See raw [metadata.json](metadata.json)) + +**Detects scenes with text, like slates, chyrons and credits.** + +* App ID: [http://apps.clams.ai/swt-detection/v4.0](http://apps.clams.ai/swt-detection/v4.0) +* App License: Apache 2.0 +* Source Repository: [https://github.com/clamsproject/app-swt-detection](https://github.com/clamsproject/app-swt-detection) ([source tree of the submitted version](https://github.com/clamsproject/app-swt-detection/tree/v4.0)) + + +#### Inputs +* [http://mmif.clams.ai/vocabulary/VideoDocument/v1](http://mmif.clams.ai/vocabulary/VideoDocument/v1) (required) +(any properties) + + +#### Configurable Parameters +**(_Multivalued_ means the parameter can have one or more values.)** + +|Name|Description|Type|Multivalued|Default|Choices| +|----|-----------|----|-----------|-------|-------| +|startAt|Number of milliseconds into the video to start processing|integer|N|0|| +|stopAt|Number of milliseconds into the video to stop processing|integer|N|10000000|| +|sampleRate|Milliseconds between sampled frames|integer|N|1000|| +|minFrameScore|Minimum score for a still frame to be included in a TimeFrame|number|N|0.01|| +|minTimeframeScore|Minimum score for a TimeFrame|number|N|0.5|| +|minFrameCount|Minimum number of sampled frames required for a TimeFrame|integer|N|2|| +|pretty|The JSON body of the HTTP response will be re-formatted with 2-space indentation|boolean|N|false|**_`false`_**, `true`| + + +#### Outputs +**(Note that not all output annotations are always generated.)** +* [http://mmif.clams.ai/vocabulary/TimeFrame/v2](http://mmif.clams.ai/vocabulary/TimeFrame/v2) + * _timeUnit_ = "milliseconds" + * _frameType_ = "bars" +* [http://mmif.clams.ai/vocabulary/TimeFrame/v2](http://mmif.clams.ai/vocabulary/TimeFrame/v2) + * _timeUnit_ = "milliseconds" + * _frameType_ = "slate" +* [http://mmif.clams.ai/vocabulary/TimeFrame/v2](http://mmif.clams.ai/vocabulary/TimeFrame/v2) + * _timeUnit_ = "milliseconds" + * _frameType_ = "chyron" +* [http://mmif.clams.ai/vocabulary/TimeFrame/v2](http://mmif.clams.ai/vocabulary/TimeFrame/v2) + * _timeUnit_ = "milliseconds" + * _frameType_ = "credits" +* [http://mmif.clams.ai/vocabulary/TimePoint/v1](http://mmif.clams.ai/vocabulary/TimePoint/v1) + * _timeUnit_ = "milliseconds" diff --git a/docs/_apps/swt-detection/v4.0/metadata.json b/docs/_apps/swt-detection/v4.0/metadata.json new file mode 100644 index 0000000..f253326 --- /dev/null +++ b/docs/_apps/swt-detection/v4.0/metadata.json @@ -0,0 +1,102 @@ +{ + "name": "Scenes-with-text Detection", + "description": "Detects scenes with text, like slates, chyrons and credits.", + "app_version": "v4.0", + "mmif_version": "1.1.0", + "app_license": "Apache 2.0", + "identifier": "http://apps.clams.ai/swt-detection/v4.0", + "url": "https://github.com/clamsproject/app-swt-detection", + "input": [ + { + "@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", + "required": true + } + ], + "output": [ + { + "@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", + "properties": { + "timeUnit": "milliseconds", + "frameType": "bars" + } + }, + { + "@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", + "properties": { + "timeUnit": "milliseconds", + "frameType": "slate" + } + }, + { + "@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", + "properties": { + "timeUnit": "milliseconds", + "frameType": "chyron" + } + }, + { + "@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", + "properties": { + "timeUnit": "milliseconds", + "frameType": "credits" + } + }, + { + "@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1", + "properties": { + "timeUnit": "milliseconds" + } + } + ], + "parameters": [ + { + "name": "startAt", + "description": "Number of milliseconds into the video to start processing", + "type": "integer", + "default": 0, + "multivalued": false + }, + { + "name": "stopAt", + "description": "Number of milliseconds into the video to stop processing", + "type": "integer", + "default": 10000000, + "multivalued": false + }, + { + "name": "sampleRate", + "description": "Milliseconds between sampled frames", + "type": "integer", + "default": 1000, + "multivalued": false + }, + { + "name": "minFrameScore", + "description": "Minimum score for a still frame to be included in a TimeFrame", + "type": "number", + "default": 0.01, + "multivalued": false + }, + { + "name": "minTimeframeScore", + "description": "Minimum score for a TimeFrame", + "type": "number", + "default": 0.5, + "multivalued": false + }, + { + "name": "minFrameCount", + "description": "Minimum number of sampled frames required for a TimeFrame", + "type": "integer", + "default": 2, + "multivalued": false + }, + { + "name": "pretty", + "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", + "type": "boolean", + "default": 0, + "multivalued": false + } + ] +} \ No newline at end of file diff --git a/docs/_apps/swt-detection/v4.0/submission.json b/docs/_apps/swt-detection/v4.0/submission.json new file mode 100644 index 0000000..f3be5a2 --- /dev/null +++ b/docs/_apps/swt-detection/v4.0/submission.json @@ -0,0 +1,6 @@ +{ + "time": "2024-02-26T18:27:42+00:00", + "submitter": "marcverhagen", + "image": "ghcr.io/clamsproject/app-swt-detection:v4.0", + "releasenotes": "Version 4.0\n\n" +} diff --git a/docs/_data/app-index.json b/docs/_data/app-index.json index d632a46..11ae10a 100644 --- a/docs/_data/app-index.json +++ b/docs/_data/app-index.json @@ -1,4 +1,30 @@ { + "http://apps.clams.ai/swt-detection": { + "description": "Detects scenes with text, like slates, chyrons and credits.", + "latest_update": "2024-02-26T18:27:42+00:00", + "versions": [ + [ + "v4.0", + "marcverhagen" + ], + [ + "v3.0", + "marcverhagen" + ], + [ + "v2.1", + "marcverhagen" + ], + [ + "v2.0", + "marcverhagen" + ], + [ + "v1.0", + "marcverhagen" + ] + ] + }, "http://apps.clams.ai/whisper-wrapper": { "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "latest_update": "2024-02-12T19:47:39+00:00", @@ -29,28 +55,6 @@ ] ] }, - "http://apps.clams.ai/swt-detection": { - "description": "Detects scenes with text, like slates, chyrons and credits.", - "latest_update": "2024-02-07T21:18:36+00:00", - "versions": [ - [ - "v3.0", - "marcverhagen" - ], - [ - "v2.1", - "marcverhagen" - ], - [ - "v2.0", - "marcverhagen" - ], - [ - "v1.0", - "marcverhagen" - ] - ] - }, "http://apps.clams.ai/easyocr-wrapper": { "description": "Using EasyOCR to extract text from timeframes", "latest_update": "2024-01-30T19:59:06+00:00", diff --git a/docs/_data/apps.json b/docs/_data/apps.json index 5a48712..4ecc042 100644 --- a/docs/_data/apps.json +++ b/docs/_data/apps.json @@ -1 +1 @@ -[{"name": "CLAMS wrapper for spaCy NLP", "description": "Apply spaCy NLP to all text documents in a MMIF file.", "app_version": "v1", "mmif_version": "0.5.0", "analyzer_version": "3.1.2", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/spacy-wrapper/v1", "url": "https://github.com/clamsproject/app-spacy-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}, {"@type": "http://vocab.lappsgrid.org/Token", "required": false}], "output": [{"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Token#pos"}, {"@type": "http://vocab.lappsgrid.org/Token#lemma"}, {"@type": "http://vocab.lappsgrid.org/NounChunk"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}, {"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "pretokenized", "description": "Boolean parameter to set the app to use existing tokenization, if available, for text documents for NLP processing. Useful to process ASR documents, for example.", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v1", "mmif_version": "1.0.0", "analyzer_version": "20230314", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v1", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}]}, {"name": "AAPB-PUA Kaldi Wrapper", "description": "A CLAMS wrapper for Kaldi-based ASR software originally developed by PopUpArchive and hipstas, and later updated by Kyeongmin Rim at Brandeis University. Wrapped software can be found at https://github.com/brandeis-llc/aapb-pua-kaldi-docker . ", "app_version": "v1", "mmif_version": "0.5.0", "analyzer_version": "v4", "app_license": "Apache 2.0", "analyzer_license": "UNKNOWN", "identifier": "http://apps.clams.ai/aapb-pua-kaldi-wrapper/v1", "url": "https://github.com/clamsproject/app-aapb-pua-kaldi-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "use_speech_segmentation", "description": "When true, the app looks for existing TimeFrame { \"frameType\": \"speech\" } annotations, and runs ASR only on those frames, instead of entire audio files.", "type": "boolean", "default": true, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "20230314", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v2", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}]}, {"name": "Brandeis ACS Wrapper", "description": "Brandeis Acoustic Classification & Segmentation (ACS) is a audio segmentation tool developed at Brandeis Lab for Linguistics and Computation. The original software can be found at https://github.com/brandeis-llc/acoustic-classification-segmentation .", "app_version": "v1", "mmif_version": "1.0.0", "analyzer_version": "1.11", "app_license": "Apache2.0", "analyzer_license": "Apache2.0", "identifier": "http://apps.clams.ai/brandeis-acs-wrapper/v1", "url": "https://github.com/clamsproject/app-brandeis-acs-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}]}, {"name": "AAPB-PUA Kaldi Wrapper", "description": "A CLAMS wrapper for Kaldi-based ASR software originally developed by PopUpArchive and hipstas, and later updated by Kyeongmin Rim at Brandeis University. Wrapped software can be found at https://github.com/brandeis-llc/aapb-pua-kaldi-docker . ", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "v4", "app_license": "Apache 2.0", "analyzer_license": "UNKNOWN", "identifier": "http://apps.clams.ai/aapb-pua-kaldi-wrapper/v2", "url": "https://github.com/clamsproject/app-aapb-pua-kaldi-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "use_speech_segmentation", "description": "When true, the app looks for existing TimeFrame { \"frameType\": \"speech\" } annotations, and runs ASR only on those frames, instead of entire audio files.", "type": "boolean", "default": true, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v1.0", "url": "https://github.com/clams-project/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "string"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 540000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": 1, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Pyscenedetect Wrapper", "description": "", "app_version": "v1", "mmif_version": "1.0.0", "analyzer_version": "0.6.1", "app_license": "Apache2", "analyzer_license": "BSD-3", "identifier": "http://apps.clams.ai/pyscenedetect-wrapper/v1", "url": "https://github.com/clamsproject/app-pyscenedetect-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "shot", "timeUnit": "frame"}}], "parameters": [{"name": "mode", "description": "pick a scene detector algorithm, see http://scenedetect.com/projects/Manual/en/latest/cli/detectors.html", "type": "string", "choices": ["content", "threshold", "adaptive"], "default": "content", "multivalued": false}, {"name": "threshold", "description": "threshold value to use in the detection algorithm. Note that the meaning of this numerical value differs for different detector algorithms.", "type": "number", "default": 27, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v1.1", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v1.1", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "string"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 540000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": 1, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v1.2", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v1.2", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "string"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 540000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": 1, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "inaSpeechSegmenter Wrapper", "description": "inaSpeechSegmenter is a CNN-based audio segmentation toolkit. The original software can be found at https://github.com/ina-foss/inaSpeechSegmenter .", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "0.7.6", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/inaspeechsegmenter-wrapper/v1.0", "url": "https://github.com/clamsproject/app-inaspeechsegmenter-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v2.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v2.0", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "slate"}}}], "parameters": [{"name": "timeUnit", "description": "Unit of time to use in output.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "inaSpeechSegmenter Wrapper", "description": "inaSpeechSegmenter is a CNN-based audio segmentation toolkit. The original software can be found at https://github.com/ina-foss/inaSpeechSegmenter .", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "0.7.6", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/inaspeechsegmenter-wrapper/v1.1", "url": "https://github.com/clamsproject/app-inaspeechsegmenter-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Brandeis ACS Wrapper", "description": "Brandeis Acoustic Classification & Segmentation (ACS) is a audio segmentation tool developed at Brandeis Lab for Linguistics and Computation. The original software can be found at https://github.com/brandeis-llc/acoustic-classification-segmentation .", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "1.11", "app_license": "Apache2.0", "analyzer_license": "Apache2.0", "identifier": "http://apps.clams.ai/brandeis-acs-wrapper/v2", "url": "https://github.com/clamsproject/app-brandeis-acs-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v3", "mmif_version": "1.0.0", "analyzer_version": "20230314", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v3", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. Can be \"tiny\", \"base\", \"small\", \"medium\", or \"large\".", "type": "string", "choices": ["tiny", "base", "small", "medium", "large"], "default": "tiny", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Pyscenedetect Wrapper", "description": "", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "0.6.1", "app_license": "Apache2", "analyzer_license": "BSD-3", "identifier": "http://apps.clams.ai/pyscenedetect-wrapper/v2", "url": "https://github.com/clamsproject/app-pyscenedetect-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "shot", "timeUnit": "frame"}}], "parameters": [{"name": "mode", "description": "pick a scene detector algorithm, see http://scenedetect.com/projects/Manual/en/latest/cli/detectors.html", "type": "string", "choices": ["content", "threshold", "adaptive"], "default": "content", "multivalued": false}, {"name": "threshold", "description": "threshold value to use in the detection algorithm. Note that the meaning of this numerical value differs for different detector algorithms.", "type": "number", "default": 27, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "EAST Text Detection", "description": "OpenCV-based text localization app that used EAST text detection model. Please visit the source code repository for full documentation.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/east-textdetection/v1.0", "url": "https://github.com/clamsproject/app-east-textdetection", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/ImageDocument/v1", "required": true}], {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": false}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"bboxtype": "text"}}], "parameters": [{"name": "timeUnit", "description": "Unit for time points in the output. Only works with VideoDocument input.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "frameType", "description": "Segments of video to run on. Only works with VideoDocument input and TimeFrame input. Empty value means run on the every frame types.", "type": "string", "choices": ["", "slate", "chyron", "rolling-credit"], "default": "", "multivalued": true}, {"name": "sampleRatio", "description": "Frequency to sample frames. Only works with VideoDocument input, and without TimeFrame input. (when `TimeFrame` annotation is found, this parameter is ignored.)", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop running. Only works with VideoDocument input. The default is roughly 2 hours of video at 30fps.", "type": "integer", "default": "2 * 60 * 60 * 30", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Dbpedia Spotlight Wrapper", "description": "Apply named entity linking to all text documents in a MMIF file.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "version_1.0", "app_license": "Apache 2.0", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/dbpedia-spotlight-wrapper/v1.0", "url": "https://github.com/clamsproject/app-dbpedia-spotlight-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}], "output": [{"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "confidence", "description": "disambiguation confidence score for linking", "type": "number", "default": 0.5, "multivalued": false}, {"name": "support", "description": "resource prominence, i.e. number of in-links in Wikipedia (lower bound)", "type": "integer", "default": 0, "multivalued": false}, {"name": "types", "description": "limits recognition to certain types of named entities, e.g. DBpedia:Place", "type": "string", "multivalued": true}, {"name": "policy", "description": "(whitelist) selects all entities of the same type; (blacklist) selects all entities not of the same type", "type": "string", "choices": ["whitelist", "blacklist"], "default": "whitelist", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "CLAMS wrapper for spaCy NLP", "description": "Apply spaCy NLP to all text documents in a MMIF file.", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "3.6", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/spacy-wrapper/v1.1", "url": "https://github.com/clamsproject/app-spacy-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}, {"@type": "http://vocab.lappsgrid.org/Token", "required": false}], "output": [{"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Token#pos"}, {"@type": "http://vocab.lappsgrid.org/Token#lemma"}, {"@type": "http://vocab.lappsgrid.org/NounChunk"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}, {"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "pretokenized", "description": "Boolean parameter to set the app to use existing tokenization, if available, for text documents for NLP processing. Useful to process ASR documents, for example.", "type": "boolean", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Tone_Detector", "description": "Detects spans of monotonic audio within an audio file", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/tonedetection/v1.0", "url": "https://github.com/clamsproject/app-tonedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "tone"}}], "parameters": [{"name": "timeUnit", "description": "the unit for annotation output", "type": "string", "choices": ["seconds", "seconds", "milliseconds"], "default": "seconds", "multivalued": false}, {"name": "lengthThreshold", "description": "minimum length threshold (in ms)", "type": "integer", "default": 2000, "multivalued": false}, {"name": "sampleSize", "description": "length for each segment of samples to be compared", "type": "integer", "default": 512, "multivalued": false}, {"name": "stopAt", "description": "stop point for audio processing (in ms). Defaults to the length of the file", "type": "integer", "default": "None", "multivalued": false}, {"name": "tolerance", "description": "threshold value for a \"match\" within audio processing", "type": "number", "default": 1, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Gentle Forced Aligner Wrapper", "description": "This CLAMS app aligns transcript and audio track using Gentle. Gentle is a robust yet lenient forced aligner built on Kaldi.This app only works when Gentle is already installed locally.Unfortunately, Gentle is not distributed as a Python package distribution.To get Gentle installation instruction, see https://lowerquality.com/gentle/ Make sure install Gentle from the git commit specified in ``analyzer_version`` in this metadata.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "f29245a", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/gentle-forced-aligner-wrapper/v1.0", "url": "https://github.com/clamsproject/app-gentle-forced-aligner-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "speech"}, "required": false}, {"@type": "http://vocab.lappsgrid.org/Token", "required": false}], "output": [{"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "speech", "timeUnit": "milliseconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}], "parameters": [{"name": "use_speech_segmentation", "description": "When set true, use exising \"speech\"-typed ``TimeFrame`` annotations and run aligner only on those frames, instead of entire audio files.", "type": "boolean", "default": true, "multivalued": false}, {"name": "use_tokenization", "description": "When set true, ``Alignment`` annotation output will honor existing latest tokenization (``Token`` annotations). Due to a limitation of the way Kaldi reads in English tokens, existing tokens must not contain whitespaces. ", "type": "boolean", "default": true, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Chyron Detection", "description": "This tool detects chyrons, generates time segments.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/chyron-detection/v1.0", "url": "https://github.com/clamsproject/app-chyron-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "chyron"}}}], "parameters": [{"name": "timeUnit", "description": "unit for output timeframe", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames", "type": "integer", "default": 5, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential chyrons", "type": "number", "default": 0.5, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Dbpedia Spotlight Wrapper", "description": "Apply named entity linking to all text documents in a MMIF file.", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "daf5309", "app_license": "Apache 2.0", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/dbpedia-spotlight-wrapper/v1.1", "url": "https://github.com/clamsproject/app-dbpedia-spotlight-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}], "output": [{"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "confidence", "description": "disambiguation confidence score for linking", "type": "number", "default": 0.5, "multivalued": false}, {"name": "support", "description": "resource prominence, i.e. number of in-links in Wikipedia (lower bound)", "type": "integer", "default": 0, "multivalued": false}, {"name": "types", "description": "types filter", "type": "string", "multivalued": false}, {"name": "policy", "description": "(whitelist) selects all entities of the same type; (blacklist) selects all entities not of the same type", "type": "string", "choices": ["whitelist", "blacklist"], "default": "whitelist", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Tesseract OCR Wrapper", "description": "This tool applies Tesseract OCR to a video or image and generates text boxes and OCR results.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "tesseract4", "app_license": "MIT", "analyzer_license": "apache", "identifier": "http://apps.clams.ai/tesseractocr-wrapper/v1.0", "url": "https://github.com/clamsproject/app-tesseractocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"boxType": "text"}, "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": false}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}], "parameters": [{"name": "frameType", "description": "Use this to specify TimeFrame to use for filtering \"text\"-typed BoundingBox annotations. Can be \"slate\", \"chyron\", \"speech\", etc.. If not set, the app won't use TimeFrames for filtering.", "type": "string", "default": "", "multivalued": true}, {"name": "threshold", "description": "Use this value between 0 and 1 to filter out low-confidence text boxes.", "type": "number", "default": 0.9, "multivalued": false}, {"name": "psm", "description": "Tesseract Page Segmentation Modes. See https://tesseract-ocr.github.io/tessdoc/ImproveQuality.html#page-segmentation-method", "type": "integer", "choices": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Bars Detection", "description": "This tool detects SMPTE color bars.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/barsdetection/v1.0", "url": "https://github.com/clamsproject/app-barsdetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"typeSpecificProperty": {"frameType": "bars"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing.", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found.", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "minimum number of frames required for a timeframe to be included in the output.", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Parseq OCR Wrapper", "description": "This tool applies Parseq OCR to a video or image and generates text boxes and OCR results.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "bc8d95cd", "app_license": "MIT", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/parseqocr-wrapper/v1.0", "url": "https://github.com/clamsproject/app-parseqocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"boxType": "text"}, "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "EAST Text Detection", "description": "OpenCV-based text localization app that used EAST text detection model. Please visit the source code repository for full documentation.", "app_version": "v1.1", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/east-textdetection/v1.1", "url": "https://github.com/clamsproject/app-east-textdetection", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/ImageDocument/v1", "required": true}], {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": false}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"bboxtype": "text"}}], "parameters": [{"name": "timeUnit", "description": "Unit for time points in the output. Only works with VideoDocument input.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "frameType", "description": "Segments of video to run on. Only works with VideoDocument input and TimeFrame input. Empty value means run on the every frame types.", "type": "string", "choices": ["", "slate", "chyron", "rolling-credit"], "default": "", "multivalued": true}, {"name": "sampleRatio", "description": "Frequency to sample frames. Only works with VideoDocument input, and without TimeFrame input. (when `TimeFrame` annotation is found, this parameter is ignored.)", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop running. Only works with VideoDocument input. The default is roughly 2 hours of video at 30fps.", "type": "integer", "default": "2 * 60 * 60 * 30", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Bars Detection", "description": "This tool detects SMPTE color bars.", "app_version": "v1.1", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/barsdetection/v1.1", "url": "https://github.com/clamsproject/app-barsdetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"typeSpecificProperty": {"frameType": "bars"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing.", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found.", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "minimum number of frames required for a timeframe to be included in the output.", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Few Shot Classifier", "description": "This tool uses a vision model to classify video segments. Currenly supports \"chyron\" frame type.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "1.0", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/fewshotclassifier/v1.0", "url": "https://github.com/clamsproject/app-fewshotclassifier", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "string"}}], "parameters": [{"name": "timeUnit", "description": "Unit for output timeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output with a minimum value of 1", "type": "integer", "default": 60, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential labels.", "type": "number", "default": 0.8, "multivalued": false}, {"name": "finetunedFrameType", "description": "Name of fine-tuned model to use. All pre-installed models are named after the frame type they were fine-tuned for.\n\nIf an empty value is passed, the app will look for fewshots.csv file in the same directory as the app.py and create a new fine-tuned model based on the examples in that file.\n\nAt the moment, a model fine-tuned on \"chyron\" frame type is shipped as pre-installed.", "type": "string", "default": "chyron", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v2.1", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v2.1", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "slate"}}}], "parameters": [{"name": "timeUnit", "description": "Unit of time to use in output.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Dbpedia Spotlight Wrapper", "description": "Apply named entity linking to all text documents in a MMIF file.", "app_version": "v1.2", "mmif_version": "1.0.0", "analyzer_version": "daf5309", "app_license": "Apache 2.0", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/dbpedia-spotlight-wrapper/v1.2", "url": "https://github.com/clamsproject/app-dbpedia-spotlight-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}], "output": [{"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "confidence", "description": "disambiguation confidence score for linking", "type": "number", "default": 0.5, "multivalued": false}, {"name": "support", "description": "resource prominence, i.e. number of in-links in Wikipedia (lower bound)", "type": "integer", "default": 0, "multivalued": false}, {"name": "types", "description": "limits recognition to certain types of named entities, e.g. DBpedia:Place", "type": "string", "multivalued": true}, {"name": "policy", "description": "(whitelist) selects all entities of the same type; (blacklist) selects all entities not of the same type", "type": "string", "choices": ["whitelist", "blacklist"], "default": "whitelist", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scene-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v1.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1"}], "parameters": [{"name": "model", "description": "the model to use, not implemented yet", "type": "string", "default": "vgg16", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v2.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v2.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1"}], "parameters": [{"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.25, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v4", "mmif_version": "1.0.0", "analyzer_version": "20231117", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v4", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. Can be \"tiny\", \"base\", \"small\", \"medium\", or \"large\".", "type": "string", "choices": ["tiny", "base", "small", "medium", "large"], "default": "tiny", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Easyocr Wrapper", "description": "Using EasyOCR to extract text from timeframes", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "1.7.0", "app_license": "MIT", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/easyocr-wrapper/v1.0", "url": "https://github.com/clamsproject/app-easyocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1"}], "parameters": [{"name": "sampleFrames", "description": "Number of frames to sample from timeframe", "type": "integer", "default": 1, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v2.1", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v2.1", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds"}}], "parameters": [{"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.25, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Easyocr Wrapper", "description": "Using EasyOCR to extract text from timeframes", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "1.7.0", "app_license": "MIT", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/easyocr-wrapper/v1.1", "url": "https://github.com/clamsproject/app-easyocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1"}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v3.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v3.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "bars"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "slate"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "chyron"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "credits"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1", "properties": {"timeUnit": "milliseconds"}}], "parameters": [{"name": "startAt", "description": "Number of milliseconds into the video to start processing", "type": "integer", "default": 0, "multivalued": false}, {"name": "stopAt", "description": "Number of milliseconds into the video to stop processing", "type": "integer", "default": 10000000, "multivalued": false}, {"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.25, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v5", "mmif_version": "1.0.0", "analyzer_version": "20231117", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v5", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "millisecond"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. Can be \"tiny\", \"base\", \"small\", \"medium\", or \"large\".", "type": "string", "choices": ["tiny", "base", "small", "medium", "large"], "default": "tiny", "multivalued": false}, {"name": "modelLang", "description": "Language of the model to use, accepts two- or three-letter ISO 639 language codes, however Whisper only supports a subset of languages. If the language is not supported, error will be raised.For the full list of supported languages, see https://github.com/openai/whisper/blob/20231117/whisper/tokenizer.py . In addition to the langauge code, two-letter region codes can be added to the language code, e.g. \"en-US\" for US English. Note that the region code is only for compatibility and recording purpose, and Whisper neither detects regional dialects, nor use the given one for transcription. When the langauge code is not given, Whisper will run in langauge detection mode, and will use first few seconds of the audio to detect the language.", "type": "string", "default": "", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v6", "mmif_version": "1.1.0", "analyzer_version": "20231117", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v6", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", "properties": {"timeUnit": "millisecond"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. When `modelLand=en` is given, for non-`large` models, English-only models will be used instead of multilingual models for speed and accuracy. (For `large` models, English-only models are not available.)", "type": "string", "choices": ["tiny", true, "base", "b", "small", "s", "medium", "m", "large", "l", "large-v2", "l2", "large-v3", "l3"], "default": "tiny", "multivalued": false}, {"name": "modelLang", "description": "Language of the model to use, accepts two- or three-letter ISO 639 language codes, however Whisper only supports a subset of languages. If the language is not supported, error will be raised.For the full list of supported languages, see https://github.com/openai/whisper/blob/20231117/whisper/tokenizer.py . In addition to the langauge code, two-letter region codes can be added to the language code, e.g. \"en-US\" for US English. Note that the region code is only for compatibility and recording purpose, and Whisper neither detects regional dialects, nor use the given one for transcription. When the langauge code is not given, Whisper will run in langauge detection mode, and will use first few seconds of the audio to detect the language.", "type": "string", "default": "", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}] \ No newline at end of file +[{"name": "CLAMS wrapper for spaCy NLP", "description": "Apply spaCy NLP to all text documents in a MMIF file.", "app_version": "v1", "mmif_version": "0.5.0", "analyzer_version": "3.1.2", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/spacy-wrapper/v1", "url": "https://github.com/clamsproject/app-spacy-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}, {"@type": "http://vocab.lappsgrid.org/Token", "required": false}], "output": [{"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Token#pos"}, {"@type": "http://vocab.lappsgrid.org/Token#lemma"}, {"@type": "http://vocab.lappsgrid.org/NounChunk"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}, {"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "pretokenized", "description": "Boolean parameter to set the app to use existing tokenization, if available, for text documents for NLP processing. Useful to process ASR documents, for example.", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v1", "mmif_version": "1.0.0", "analyzer_version": "20230314", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v1", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}]}, {"name": "AAPB-PUA Kaldi Wrapper", "description": "A CLAMS wrapper for Kaldi-based ASR software originally developed by PopUpArchive and hipstas, and later updated by Kyeongmin Rim at Brandeis University. Wrapped software can be found at https://github.com/brandeis-llc/aapb-pua-kaldi-docker . ", "app_version": "v1", "mmif_version": "0.5.0", "analyzer_version": "v4", "app_license": "Apache 2.0", "analyzer_license": "UNKNOWN", "identifier": "http://apps.clams.ai/aapb-pua-kaldi-wrapper/v1", "url": "https://github.com/clamsproject/app-aapb-pua-kaldi-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "use_speech_segmentation", "description": "When true, the app looks for existing TimeFrame { \"frameType\": \"speech\" } annotations, and runs ASR only on those frames, instead of entire audio files.", "type": "boolean", "default": true, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "20230314", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v2", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}]}, {"name": "Brandeis ACS Wrapper", "description": "Brandeis Acoustic Classification & Segmentation (ACS) is a audio segmentation tool developed at Brandeis Lab for Linguistics and Computation. The original software can be found at https://github.com/brandeis-llc/acoustic-classification-segmentation .", "app_version": "v1", "mmif_version": "1.0.0", "analyzer_version": "1.11", "app_license": "Apache2.0", "analyzer_license": "Apache2.0", "identifier": "http://apps.clams.ai/brandeis-acs-wrapper/v1", "url": "https://github.com/clamsproject/app-brandeis-acs-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}]}, {"name": "AAPB-PUA Kaldi Wrapper", "description": "A CLAMS wrapper for Kaldi-based ASR software originally developed by PopUpArchive and hipstas, and later updated by Kyeongmin Rim at Brandeis University. Wrapped software can be found at https://github.com/brandeis-llc/aapb-pua-kaldi-docker . ", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "v4", "app_license": "Apache 2.0", "analyzer_license": "UNKNOWN", "identifier": "http://apps.clams.ai/aapb-pua-kaldi-wrapper/v2", "url": "https://github.com/clamsproject/app-aapb-pua-kaldi-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "use_speech_segmentation", "description": "When true, the app looks for existing TimeFrame { \"frameType\": \"speech\" } annotations, and runs ASR only on those frames, instead of entire audio files.", "type": "boolean", "default": true, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v1.0", "url": "https://github.com/clams-project/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "string"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 540000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": 1, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Pyscenedetect Wrapper", "description": "", "app_version": "v1", "mmif_version": "1.0.0", "analyzer_version": "0.6.1", "app_license": "Apache2", "analyzer_license": "BSD-3", "identifier": "http://apps.clams.ai/pyscenedetect-wrapper/v1", "url": "https://github.com/clamsproject/app-pyscenedetect-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "shot", "timeUnit": "frame"}}], "parameters": [{"name": "mode", "description": "pick a scene detector algorithm, see http://scenedetect.com/projects/Manual/en/latest/cli/detectors.html", "type": "string", "choices": ["content", "threshold", "adaptive"], "default": "content", "multivalued": false}, {"name": "threshold", "description": "threshold value to use in the detection algorithm. Note that the meaning of this numerical value differs for different detector algorithms.", "type": "number", "default": 27, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v1.1", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v1.1", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "string"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 540000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": 1, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v1.2", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v1.2", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "string"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 540000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": 1, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "inaSpeechSegmenter Wrapper", "description": "inaSpeechSegmenter is a CNN-based audio segmentation toolkit. The original software can be found at https://github.com/ina-foss/inaSpeechSegmenter .", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "0.7.6", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/inaspeechsegmenter-wrapper/v1.0", "url": "https://github.com/clamsproject/app-inaspeechsegmenter-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v2.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v2.0", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "slate"}}}], "parameters": [{"name": "timeUnit", "description": "Unit of time to use in output.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "inaSpeechSegmenter Wrapper", "description": "inaSpeechSegmenter is a CNN-based audio segmentation toolkit. The original software can be found at https://github.com/ina-foss/inaSpeechSegmenter .", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "0.7.6", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/inaspeechsegmenter-wrapper/v1.1", "url": "https://github.com/clamsproject/app-inaspeechsegmenter-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Brandeis ACS Wrapper", "description": "Brandeis Acoustic Classification & Segmentation (ACS) is a audio segmentation tool developed at Brandeis Lab for Linguistics and Computation. The original software can be found at https://github.com/brandeis-llc/acoustic-classification-segmentation .", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "1.11", "app_license": "Apache2.0", "analyzer_license": "Apache2.0", "identifier": "http://apps.clams.ai/brandeis-acs-wrapper/v2", "url": "https://github.com/clamsproject/app-brandeis-acs-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeunit": "milliseconds"}}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v3", "mmif_version": "1.0.0", "analyzer_version": "20230314", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v3", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. Can be \"tiny\", \"base\", \"small\", \"medium\", or \"large\".", "type": "string", "choices": ["tiny", "base", "small", "medium", "large"], "default": "tiny", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Pyscenedetect Wrapper", "description": "", "app_version": "v2", "mmif_version": "1.0.0", "analyzer_version": "0.6.1", "app_license": "Apache2", "analyzer_license": "BSD-3", "identifier": "http://apps.clams.ai/pyscenedetect-wrapper/v2", "url": "https://github.com/clamsproject/app-pyscenedetect-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "shot", "timeUnit": "frame"}}], "parameters": [{"name": "mode", "description": "pick a scene detector algorithm, see http://scenedetect.com/projects/Manual/en/latest/cli/detectors.html", "type": "string", "choices": ["content", "threshold", "adaptive"], "default": "content", "multivalued": false}, {"name": "threshold", "description": "threshold value to use in the detection algorithm. Note that the meaning of this numerical value differs for different detector algorithms.", "type": "number", "default": 27, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "EAST Text Detection", "description": "OpenCV-based text localization app that used EAST text detection model. Please visit the source code repository for full documentation.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/east-textdetection/v1.0", "url": "https://github.com/clamsproject/app-east-textdetection", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/ImageDocument/v1", "required": true}], {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": false}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"bboxtype": "text"}}], "parameters": [{"name": "timeUnit", "description": "Unit for time points in the output. Only works with VideoDocument input.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "frameType", "description": "Segments of video to run on. Only works with VideoDocument input and TimeFrame input. Empty value means run on the every frame types.", "type": "string", "choices": ["", "slate", "chyron", "rolling-credit"], "default": "", "multivalued": true}, {"name": "sampleRatio", "description": "Frequency to sample frames. Only works with VideoDocument input, and without TimeFrame input. (when `TimeFrame` annotation is found, this parameter is ignored.)", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop running. Only works with VideoDocument input. The default is roughly 2 hours of video at 30fps.", "type": "integer", "default": "2 * 60 * 60 * 30", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Dbpedia Spotlight Wrapper", "description": "Apply named entity linking to all text documents in a MMIF file.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "version_1.0", "app_license": "Apache 2.0", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/dbpedia-spotlight-wrapper/v1.0", "url": "https://github.com/clamsproject/app-dbpedia-spotlight-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}], "output": [{"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "confidence", "description": "disambiguation confidence score for linking", "type": "number", "default": 0.5, "multivalued": false}, {"name": "support", "description": "resource prominence, i.e. number of in-links in Wikipedia (lower bound)", "type": "integer", "default": 0, "multivalued": false}, {"name": "types", "description": "limits recognition to certain types of named entities, e.g. DBpedia:Place", "type": "string", "multivalued": true}, {"name": "policy", "description": "(whitelist) selects all entities of the same type; (blacklist) selects all entities not of the same type", "type": "string", "choices": ["whitelist", "blacklist"], "default": "whitelist", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "CLAMS wrapper for spaCy NLP", "description": "Apply spaCy NLP to all text documents in a MMIF file.", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "3.6", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/spacy-wrapper/v1.1", "url": "https://github.com/clamsproject/app-spacy-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}, {"@type": "http://vocab.lappsgrid.org/Token", "required": false}], "output": [{"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Token#pos"}, {"@type": "http://vocab.lappsgrid.org/Token#lemma"}, {"@type": "http://vocab.lappsgrid.org/NounChunk"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}, {"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "pretokenized", "description": "Boolean parameter to set the app to use existing tokenization, if available, for text documents for NLP processing. Useful to process ASR documents, for example.", "type": "boolean", "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Tone_Detector", "description": "Detects spans of monotonic audio within an audio file", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/tonedetection/v1.0", "url": "https://github.com/clamsproject/app-tonedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "tone"}}], "parameters": [{"name": "timeUnit", "description": "the unit for annotation output", "type": "string", "choices": ["seconds", "seconds", "milliseconds"], "default": "seconds", "multivalued": false}, {"name": "lengthThreshold", "description": "minimum length threshold (in ms)", "type": "integer", "default": 2000, "multivalued": false}, {"name": "sampleSize", "description": "length for each segment of samples to be compared", "type": "integer", "default": 512, "multivalued": false}, {"name": "stopAt", "description": "stop point for audio processing (in ms). Defaults to the length of the file", "type": "integer", "default": "None", "multivalued": false}, {"name": "tolerance", "description": "threshold value for a \"match\" within audio processing", "type": "number", "default": 1, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Gentle Forced Aligner Wrapper", "description": "This CLAMS app aligns transcript and audio track using Gentle. Gentle is a robust yet lenient forced aligner built on Kaldi.This app only works when Gentle is already installed locally.Unfortunately, Gentle is not distributed as a Python package distribution.To get Gentle installation instruction, see https://lowerquality.com/gentle/ Make sure install Gentle from the git commit specified in ``analyzer_version`` in this metadata.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "f29245a", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/gentle-forced-aligner-wrapper/v1.0", "url": "https://github.com/clamsproject/app-gentle-forced-aligner-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "speech"}, "required": false}, {"@type": "http://vocab.lappsgrid.org/Token", "required": false}], "output": [{"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "speech", "timeUnit": "milliseconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}], "parameters": [{"name": "use_speech_segmentation", "description": "When set true, use exising \"speech\"-typed ``TimeFrame`` annotations and run aligner only on those frames, instead of entire audio files.", "type": "boolean", "default": true, "multivalued": false}, {"name": "use_tokenization", "description": "When set true, ``Alignment`` annotation output will honor existing latest tokenization (``Token`` annotations). Due to a limitation of the way Kaldi reads in English tokens, existing tokens must not contain whitespaces. ", "type": "boolean", "default": true, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Chyron Detection", "description": "This tool detects chyrons, generates time segments.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/chyron-detection/v1.0", "url": "https://github.com/clamsproject/app-chyron-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "chyron"}}}], "parameters": [{"name": "timeUnit", "description": "unit for output timeframe", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames", "type": "integer", "default": 5, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential chyrons", "type": "number", "default": 0.5, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Dbpedia Spotlight Wrapper", "description": "Apply named entity linking to all text documents in a MMIF file.", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "daf5309", "app_license": "Apache 2.0", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/dbpedia-spotlight-wrapper/v1.1", "url": "https://github.com/clamsproject/app-dbpedia-spotlight-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}], "output": [{"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "confidence", "description": "disambiguation confidence score for linking", "type": "number", "default": 0.5, "multivalued": false}, {"name": "support", "description": "resource prominence, i.e. number of in-links in Wikipedia (lower bound)", "type": "integer", "default": 0, "multivalued": false}, {"name": "types", "description": "types filter", "type": "string", "multivalued": false}, {"name": "policy", "description": "(whitelist) selects all entities of the same type; (blacklist) selects all entities not of the same type", "type": "string", "choices": ["whitelist", "blacklist"], "default": "whitelist", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Tesseract OCR Wrapper", "description": "This tool applies Tesseract OCR to a video or image and generates text boxes and OCR results.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "tesseract4", "app_license": "MIT", "analyzer_license": "apache", "identifier": "http://apps.clams.ai/tesseractocr-wrapper/v1.0", "url": "https://github.com/clamsproject/app-tesseractocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"boxType": "text"}, "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": false}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}], "parameters": [{"name": "frameType", "description": "Use this to specify TimeFrame to use for filtering \"text\"-typed BoundingBox annotations. Can be \"slate\", \"chyron\", \"speech\", etc.. If not set, the app won't use TimeFrames for filtering.", "type": "string", "default": "", "multivalued": true}, {"name": "threshold", "description": "Use this value between 0 and 1 to filter out low-confidence text boxes.", "type": "number", "default": 0.9, "multivalued": false}, {"name": "psm", "description": "Tesseract Page Segmentation Modes. See https://tesseract-ocr.github.io/tessdoc/ImproveQuality.html#page-segmentation-method", "type": "integer", "choices": [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13], "default": 0, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Bars Detection", "description": "This tool detects SMPTE color bars.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/barsdetection/v1.0", "url": "https://github.com/clamsproject/app-barsdetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"typeSpecificProperty": {"frameType": "bars"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing.", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found.", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "minimum number of frames required for a timeframe to be included in the output.", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Parseq OCR Wrapper", "description": "This tool applies Parseq OCR to a video or image and generates text boxes and OCR results.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "bc8d95cd", "app_license": "MIT", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/parseqocr-wrapper/v1.0", "url": "https://github.com/clamsproject/app-parseqocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"boxType": "text"}, "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "EAST Text Detection", "description": "OpenCV-based text localization app that used EAST text detection model. Please visit the source code repository for full documentation.", "app_version": "v1.1", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/east-textdetection/v1.1", "url": "https://github.com/clamsproject/app-east-textdetection", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/ImageDocument/v1", "required": true}], {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": false}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1", "properties": {"bboxtype": "text"}}], "parameters": [{"name": "timeUnit", "description": "Unit for time points in the output. Only works with VideoDocument input.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "frameType", "description": "Segments of video to run on. Only works with VideoDocument input and TimeFrame input. Empty value means run on the every frame types.", "type": "string", "choices": ["", "slate", "chyron", "rolling-credit"], "default": "", "multivalued": true}, {"name": "sampleRatio", "description": "Frequency to sample frames. Only works with VideoDocument input, and without TimeFrame input. (when `TimeFrame` annotation is found, this parameter is ignored.)", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop running. Only works with VideoDocument input. The default is roughly 2 hours of video at 30fps.", "type": "integer", "default": "2 * 60 * 60 * 30", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Bars Detection", "description": "This tool detects SMPTE color bars.", "app_version": "v1.1", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/barsdetection/v1.1", "url": "https://github.com/clamsproject/app-barsdetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"typeSpecificProperty": {"frameType": "bars"}}}], "parameters": [{"name": "timeUnit", "description": "Unit for output typeframe.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing.", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found.", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "minimum number of frames required for a timeframe to be included in the output.", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Few Shot Classifier", "description": "This tool uses a vision model to classify video segments. Currenly supports \"chyron\" frame type.", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "1.0", "app_license": "MIT", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/fewshotclassifier/v1.0", "url": "https://github.com/clamsproject/app-fewshotclassifier", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"frameType": "string"}}], "parameters": [{"name": "timeUnit", "description": "Unit for output timeframe", "type": "string", "choices": ["frames", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output with a minimum value of 1", "type": "integer", "default": 60, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential labels.", "type": "number", "default": 0.8, "multivalued": false}, {"name": "finetunedFrameType", "description": "Name of fine-tuned model to use. All pre-installed models are named after the frame type they were fine-tuned for.\n\nIf an empty value is passed, the app will look for fewshots.csv file in the same directory as the app.py and create a new fine-tuned model based on the examples in that file.\n\nAt the moment, a model fine-tuned on \"chyron\" frame type is shipped as pre-installed.", "type": "string", "default": "chyron", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Slate Detection", "description": "This tool detects slates.", "app_version": "v2.1", "mmif_version": "1.0.0", "app_license": "MIT", "identifier": "http://apps.clams.ai/slatedetection/v2.1", "url": "https://github.com/clamsproject/app-slatedetection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"properties": {"frameType": "slate"}}}], "parameters": [{"name": "timeUnit", "description": "Unit of time to use in output.", "type": "string", "choices": ["frames", "seconds", "milliseconds"], "default": "frames", "multivalued": false}, {"name": "sampleRatio", "description": "Frequency to sample frames.", "type": "integer", "default": 30, "multivalued": false}, {"name": "stopAt", "description": "Frame number to stop processing", "type": "integer", "default": 9000, "multivalued": false}, {"name": "stopAfterOne", "description": "When True, processing stops after first timeframe is found", "type": "boolean", "default": true, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of frames required for a timeframe to be included in the output", "type": "integer", "default": 10, "multivalued": false}, {"name": "threshold", "description": "Threshold from 0-1, lower accepts more potential slates.", "type": "number", "default": 0.7, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Dbpedia Spotlight Wrapper", "description": "Apply named entity linking to all text documents in a MMIF file.", "app_version": "v1.2", "mmif_version": "1.0.0", "analyzer_version": "daf5309", "app_license": "Apache 2.0", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/dbpedia-spotlight-wrapper/v1.2", "url": "https://github.com/clamsproject/app-dbpedia-spotlight-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1", "required": true}], "output": [{"@type": "http://vocab.lappsgrid.org/NamedEntity"}], "parameters": [{"name": "confidence", "description": "disambiguation confidence score for linking", "type": "number", "default": 0.5, "multivalued": false}, {"name": "support", "description": "resource prominence, i.e. number of in-links in Wikipedia (lower bound)", "type": "integer", "default": 0, "multivalued": false}, {"name": "types", "description": "limits recognition to certain types of named entities, e.g. DBpedia:Place", "type": "string", "multivalued": true}, {"name": "policy", "description": "(whitelist) selects all entities of the same type; (blacklist) selects all entities not of the same type", "type": "string", "choices": ["whitelist", "blacklist"], "default": "whitelist", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scene-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v1.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v1.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1"}], "parameters": [{"name": "model", "description": "the model to use, not implemented yet", "type": "string", "default": "vgg16", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v2.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v2.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1"}], "parameters": [{"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.25, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v4", "mmif_version": "1.0.0", "analyzer_version": "20231117", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v4", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "seconds"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. Can be \"tiny\", \"base\", \"small\", \"medium\", or \"large\".", "type": "string", "choices": ["tiny", "base", "small", "medium", "large"], "default": "tiny", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Easyocr Wrapper", "description": "Using EasyOCR to extract text from timeframes", "app_version": "v1.0", "mmif_version": "1.0.0", "analyzer_version": "1.7.0", "app_license": "MIT", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/easyocr-wrapper/v1.0", "url": "https://github.com/clamsproject/app-easyocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1"}], "parameters": [{"name": "sampleFrames", "description": "Number of frames to sample from timeframe", "type": "integer", "default": 1, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v2.1", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v2.1", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds"}}], "parameters": [{"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.25, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Easyocr Wrapper", "description": "Using EasyOCR to extract text from timeframes", "app_version": "v1.1", "mmif_version": "1.0.0", "analyzer_version": "1.7.0", "app_license": "MIT", "analyzer_license": "Apache 2.0", "identifier": "http://apps.clams.ai/easyocr-wrapper/v1.1", "url": "https://github.com/clamsproject/app-easyocr-wrapper", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/BoundingBox/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1"}], "parameters": [{"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v3.0", "mmif_version": "1.0.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v3.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "bars"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "slate"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "chyron"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "milliseconds", "frameType": "credits"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1", "properties": {"timeUnit": "milliseconds"}}], "parameters": [{"name": "startAt", "description": "Number of milliseconds into the video to start processing", "type": "integer", "default": 0, "multivalued": false}, {"name": "stopAt", "description": "Number of milliseconds into the video to stop processing", "type": "integer", "default": 10000000, "multivalued": false}, {"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.25, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v5", "mmif_version": "1.0.0", "analyzer_version": "20231117", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v5", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v1", "properties": {"timeUnit": "millisecond"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. Can be \"tiny\", \"base\", \"small\", \"medium\", or \"large\".", "type": "string", "choices": ["tiny", "base", "small", "medium", "large"], "default": "tiny", "multivalued": false}, {"name": "modelLang", "description": "Language of the model to use, accepts two- or three-letter ISO 639 language codes, however Whisper only supports a subset of languages. If the language is not supported, error will be raised.For the full list of supported languages, see https://github.com/openai/whisper/blob/20231117/whisper/tokenizer.py . In addition to the langauge code, two-letter region codes can be added to the language code, e.g. \"en-US\" for US English. Note that the region code is only for compatibility and recording purpose, and Whisper neither detects regional dialects, nor use the given one for transcription. When the langauge code is not given, Whisper will run in langauge detection mode, and will use first few seconds of the audio to detect the language.", "type": "string", "default": "", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Whisper Wrapper", "description": "A CLAMS wrapper for Whisper-based ASR software originally developed by OpenAI.", "app_version": "v6", "mmif_version": "1.1.0", "analyzer_version": "20231117", "app_license": "Apache 2.0", "analyzer_license": "MIT", "identifier": "http://apps.clams.ai/whisper-wrapper/v6", "url": "https://github.com/clamsproject/app-whisper-wrapper", "input": [[{"@type": "http://mmif.clams.ai/vocabulary/AudioDocument/v1", "required": true}, {"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}]], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TextDocument/v1"}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", "properties": {"timeUnit": "millisecond"}}, {"@type": "http://mmif.clams.ai/vocabulary/Alignment/v1"}, {"@type": "http://vocab.lappsgrid.org/Token"}, {"@type": "http://vocab.lappsgrid.org/Sentence"}], "parameters": [{"name": "modelSize", "description": "The size of the model to use. When `modelLand=en` is given, for non-`large` models, English-only models will be used instead of multilingual models for speed and accuracy. (For `large` models, English-only models are not available.)", "type": "string", "choices": ["tiny", true, "base", "b", "small", "s", "medium", "m", "large", "l", "large-v2", "l2", "large-v3", "l3"], "default": "tiny", "multivalued": false}, {"name": "modelLang", "description": "Language of the model to use, accepts two- or three-letter ISO 639 language codes, however Whisper only supports a subset of languages. If the language is not supported, error will be raised.For the full list of supported languages, see https://github.com/openai/whisper/blob/20231117/whisper/tokenizer.py . In addition to the langauge code, two-letter region codes can be added to the language code, e.g. \"en-US\" for US English. Note that the region code is only for compatibility and recording purpose, and Whisper neither detects regional dialects, nor use the given one for transcription. When the langauge code is not given, Whisper will run in langauge detection mode, and will use first few seconds of the audio to detect the language.", "type": "string", "default": "", "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}, {"name": "Scenes-with-text Detection", "description": "Detects scenes with text, like slates, chyrons and credits.", "app_version": "v4.0", "mmif_version": "1.1.0", "app_license": "Apache 2.0", "identifier": "http://apps.clams.ai/swt-detection/v4.0", "url": "https://github.com/clamsproject/app-swt-detection", "input": [{"@type": "http://mmif.clams.ai/vocabulary/VideoDocument/v1", "required": true}], "output": [{"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", "properties": {"timeUnit": "milliseconds", "frameType": "bars"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", "properties": {"timeUnit": "milliseconds", "frameType": "slate"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", "properties": {"timeUnit": "milliseconds", "frameType": "chyron"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimeFrame/v2", "properties": {"timeUnit": "milliseconds", "frameType": "credits"}}, {"@type": "http://mmif.clams.ai/vocabulary/TimePoint/v1", "properties": {"timeUnit": "milliseconds"}}], "parameters": [{"name": "startAt", "description": "Number of milliseconds into the video to start processing", "type": "integer", "default": 0, "multivalued": false}, {"name": "stopAt", "description": "Number of milliseconds into the video to stop processing", "type": "integer", "default": 10000000, "multivalued": false}, {"name": "sampleRate", "description": "Milliseconds between sampled frames", "type": "integer", "default": 1000, "multivalued": false}, {"name": "minFrameScore", "description": "Minimum score for a still frame to be included in a TimeFrame", "type": "number", "default": 0.01, "multivalued": false}, {"name": "minTimeframeScore", "description": "Minimum score for a TimeFrame", "type": "number", "default": 0.5, "multivalued": false}, {"name": "minFrameCount", "description": "Minimum number of sampled frames required for a TimeFrame", "type": "integer", "default": 2, "multivalued": false}, {"name": "pretty", "description": "The JSON body of the HTTP response will be re-formatted with 2-space indentation", "type": "boolean", "default": 0, "multivalued": false}]}] \ No newline at end of file